mirror of
https://github.com/SigNoz/signoz.git
synced 2026-04-22 03:40:29 +01:00
Compare commits
48 Commits
refactor/r
...
tests/unif
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8af041a345 | ||
|
|
52cf194111 | ||
|
|
f80b899dbe | ||
|
|
d2898efce8 | ||
|
|
c999a2f44f | ||
|
|
c3a889d7e1 | ||
|
|
c35ff1d4d5 | ||
|
|
40d429bc89 | ||
|
|
8a58db9bda | ||
|
|
fe735b66b7 | ||
|
|
887d5c47b2 | ||
|
|
e0778973ac | ||
|
|
9f2269fee6 | ||
|
|
6481b660ee | ||
|
|
de0396d5bd | ||
|
|
49ef953b15 | ||
|
|
24513f305d | ||
|
|
34d36ecd2c | ||
|
|
0e13757719 | ||
|
|
8633b3d358 | ||
|
|
c4bde774e1 | ||
|
|
acff718113 | ||
|
|
8d4122df22 | ||
|
|
138b0cd606 | ||
|
|
c17e54ad01 | ||
|
|
51581160eb | ||
|
|
7959e9eadd | ||
|
|
a6faab083f | ||
|
|
d43c2bb4d7 | ||
|
|
68c8504ac7 | ||
|
|
523dcd6219 | ||
|
|
5ebe95e3d6 | ||
|
|
527963b7f4 | ||
|
|
afcc02882d | ||
|
|
f4748f7088 | ||
|
|
36d766d3d9 | ||
|
|
96188a38b4 | ||
|
|
8cfa3bbe94 | ||
|
|
0d97f543df | ||
|
|
be7099b2b4 | ||
|
|
ab6e8291fe | ||
|
|
0839c532bc | ||
|
|
5ef206a666 | ||
|
|
fce92115a9 | ||
|
|
9743002edf | ||
|
|
0efde7b5ce | ||
|
|
8bdaecbe25 | ||
|
|
deb90abd9c |
10
.github/workflows/integrationci.yaml
vendored
10
.github/workflows/integrationci.yaml
vendored
@@ -25,11 +25,11 @@ jobs:
|
||||
uses: astral-sh/setup-uv@v4
|
||||
- name: install
|
||||
run: |
|
||||
cd tests/integration && uv sync
|
||||
cd tests && uv sync
|
||||
- name: fmt
|
||||
run: |
|
||||
make py-fmt
|
||||
git diff --exit-code -- tests/integration/
|
||||
git diff --exit-code -- tests/
|
||||
- name: lint
|
||||
run: |
|
||||
make py-lint
|
||||
@@ -79,7 +79,7 @@ jobs:
|
||||
uses: astral-sh/setup-uv@v4
|
||||
- name: install
|
||||
run: |
|
||||
cd tests/integration && uv sync
|
||||
cd tests && uv sync
|
||||
- name: webdriver
|
||||
run: |
|
||||
wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
|
||||
@@ -99,10 +99,10 @@ jobs:
|
||||
google-chrome-stable --version
|
||||
- name: run
|
||||
run: |
|
||||
cd tests/integration && \
|
||||
cd tests && \
|
||||
uv run pytest \
|
||||
--basetemp=./tmp/ \
|
||||
src/${{matrix.src}} \
|
||||
integration/src/${{matrix.src}} \
|
||||
--sqlstore-provider ${{matrix.sqlstore-provider}} \
|
||||
--sqlite-mode ${{matrix.sqlite-mode}} \
|
||||
--postgres-version ${{matrix.postgres-version}} \
|
||||
|
||||
22
Makefile
22
Makefile
@@ -201,26 +201,26 @@ docker-buildx-enterprise: go-build-enterprise js-build
|
||||
# python commands
|
||||
##############################################################
|
||||
.PHONY: py-fmt
|
||||
py-fmt: ## Run black for integration tests
|
||||
@cd tests/integration && uv run black .
|
||||
py-fmt: ## Run black across the shared tests project
|
||||
@cd tests && uv run black .
|
||||
|
||||
.PHONY: py-lint
|
||||
py-lint: ## Run lint for integration tests
|
||||
@cd tests/integration && uv run isort .
|
||||
@cd tests/integration && uv run autoflake .
|
||||
@cd tests/integration && uv run pylint .
|
||||
py-lint: ## Run lint across the shared tests project
|
||||
@cd tests && uv run isort .
|
||||
@cd tests && uv run autoflake .
|
||||
@cd tests && uv run pylint .
|
||||
|
||||
.PHONY: py-test-setup
|
||||
py-test-setup: ## Runs integration tests
|
||||
@cd tests/integration && uv run pytest --basetemp=./tmp/ -vv --reuse --capture=no src/bootstrap/setup.py::test_setup
|
||||
py-test-setup: ## Bring up the shared SigNoz backend used by integration and e2e tests
|
||||
@cd tests && uv run pytest --basetemp=./tmp/ -vv --reuse --capture=no integration/bootstrap/setup.py::test_setup
|
||||
|
||||
.PHONY: py-test-teardown
|
||||
py-test-teardown: ## Runs integration tests with teardown
|
||||
@cd tests/integration && uv run pytest --basetemp=./tmp/ -vv --teardown --capture=no src/bootstrap/setup.py::test_teardown
|
||||
py-test-teardown: ## Tear down the shared SigNoz backend
|
||||
@cd tests && uv run pytest --basetemp=./tmp/ -vv --teardown --capture=no integration/bootstrap/setup.py::test_teardown
|
||||
|
||||
.PHONY: py-test
|
||||
py-test: ## Runs integration tests
|
||||
@cd tests/integration && uv run pytest --basetemp=./tmp/ -vv --capture=no src/
|
||||
@cd tests && uv run pytest --basetemp=./tmp/ -vv --capture=no integration/tests/
|
||||
|
||||
.PHONY: py-clean
|
||||
py-clean: ## Clear all pycache and pytest cache from tests directory recursively
|
||||
|
||||
171
docs/contributing/tests/e2e.md
Normal file
171
docs/contributing/tests/e2e.md
Normal file
@@ -0,0 +1,171 @@
|
||||
# E2E tests
|
||||
|
||||
Playwright-based end-to-end suite for the SigNoz frontend. Wired into the
|
||||
shared pytest project at `tests/` — pytest fixtures bring up a containerized
|
||||
backend (ClickHouse + Postgres + migrator + SigNoz-with-web), register an
|
||||
admin, and seed dashboards/alerts/telemetry before Playwright runs.
|
||||
|
||||
Source lives at `tests/e2e/`.
|
||||
|
||||
## Layout
|
||||
|
||||
```
|
||||
tests/e2e/
|
||||
bootstrap/
|
||||
setup.py Brings backend + seeder up; writes .env.local
|
||||
run.py One-command entrypoint: subprocesses `yarn test`
|
||||
tests/ Playwright .spec.ts files (per-feature dirs)
|
||||
fixtures/auth.ts authedPage Playwright fixture + ensureLoggedIn helper
|
||||
playwright.config.ts Loads .env (user) + .env.local (generated) via dotenv
|
||||
```
|
||||
|
||||
Each spec owns its own data. Telemetry goes through the seeder
|
||||
(`tests/seeder/`, exposing `/telemetry/{traces,logs,metrics}` POST+DELETE);
|
||||
dashboards, alert rules, and org config go through the SigNoz REST API
|
||||
directly from the spec. No global pre-seeding fixtures.
|
||||
|
||||
## Running
|
||||
|
||||
### One-command local run
|
||||
|
||||
Pytest owns the lifecycle: provisions containers, registers the admin,
|
||||
starts the seeder, writes backend coordinates to `tests/e2e/.env.local`
|
||||
(loaded by `playwright.config.ts` via dotenv), then shells out to
|
||||
`yarn test`:
|
||||
|
||||
```bash
|
||||
cd signoz/tests
|
||||
uv sync # first time only
|
||||
uv run pytest --basetemp=./tmp/ -vv --with-web \
|
||||
e2e/bootstrap/run.py::test_e2e
|
||||
```
|
||||
|
||||
### Iterative Playwright development
|
||||
|
||||
Bring the backend up once (`--reuse` keeps containers warm), then drive
|
||||
Playwright directly:
|
||||
|
||||
```bash
|
||||
cd signoz/tests
|
||||
uv run pytest --basetemp=./tmp/ -vv --reuse --with-web \
|
||||
e2e/bootstrap/setup.py::test_setup
|
||||
|
||||
cd e2e
|
||||
yarn install && yarn install:browsers # first time
|
||||
yarn test # headless
|
||||
yarn test:ui # interactive
|
||||
yarn test:headed # headed
|
||||
yarn test:debug # step-through
|
||||
yarn test tests/roles/roles-listing.spec.ts # single file
|
||||
```
|
||||
|
||||
Teardown:
|
||||
|
||||
```bash
|
||||
cd signoz/tests
|
||||
uv run pytest --basetemp=./tmp/ -vv --teardown \
|
||||
e2e/bootstrap/setup.py::test_teardown
|
||||
```
|
||||
|
||||
### Staging fallback
|
||||
|
||||
Point `SIGNOZ_E2E_BASE_URL` at a remote env via `.env` — no local
|
||||
backend bring-up, no `.env.local` generated, Playwright hits the URL
|
||||
directly:
|
||||
|
||||
```bash
|
||||
cp .env.example .env # fill SIGNOZ_E2E_USERNAME / PASSWORD
|
||||
yarn test:staging
|
||||
```
|
||||
|
||||
### Environment variables
|
||||
|
||||
| Variable | Description |
|
||||
|---|---|
|
||||
| `SIGNOZ_E2E_BASE_URL` | Base URL (staging mode) |
|
||||
| `SIGNOZ_E2E_USERNAME` | Test user email (staging mode) |
|
||||
| `SIGNOZ_E2E_PASSWORD` | Test user password (staging mode) |
|
||||
|
||||
## Writing tests
|
||||
|
||||
```typescript
|
||||
import { expect, test } from '@playwright/test';
|
||||
import { ensureLoggedIn } from '../../fixtures/auth';
|
||||
|
||||
test.describe('Feature name', () => {
|
||||
test.beforeEach(async ({ page }) => {
|
||||
await ensureLoggedIn(page);
|
||||
await page.goto('/feature');
|
||||
});
|
||||
|
||||
test('Test name', async ({ page }) => {
|
||||
// steps
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Locator priority
|
||||
|
||||
1. `getByRole('button', { name: 'Submit' })`
|
||||
2. `getByLabel('Email')`
|
||||
3. `getByPlaceholder('...')`
|
||||
4. `getByText('...')`
|
||||
5. `getByTestId('...')`
|
||||
6. `locator('.ant-select')` — last resort (Ant Design dropdowns often have
|
||||
no semantic alternative)
|
||||
|
||||
### Conventions
|
||||
|
||||
- Unique test data: `` const name = `Test ${Date.now()}`; ``
|
||||
- Prefer explicit waits over `page.waitForTimeout(ms)`:
|
||||
```typescript
|
||||
await expect(page.getByRole('dialog')).toBeVisible(); // good
|
||||
await page.waitForTimeout(5000); // avoid
|
||||
```
|
||||
- Never commit `test.only` or untagged tests.
|
||||
|
||||
## AI-assisted test authoring (optional)
|
||||
|
||||
Playwright's `init-agents` workflow is wired up for Claude Code and VS Code
|
||||
Copilot. Agents live in `tests/e2e/.claude/agents/` and
|
||||
`.github/chatmodes/` respectively. Re-run after each Playwright version
|
||||
upgrade:
|
||||
|
||||
```bash
|
||||
npx playwright init-agents --loop=claude
|
||||
npx playwright init-agents --loop=vscode
|
||||
```
|
||||
|
||||
Three agents:
|
||||
|
||||
| Agent | Input | Output |
|
||||
|---|---|---|
|
||||
| `playwright-test-planner` | URL + seed test | Markdown plan (local scratch) |
|
||||
| `playwright-test-generator` | Plan + seed test | `tests/<feature>/<feature>.spec.ts` (validated live) |
|
||||
| `playwright-test-healer` | Failing spec + error | Patched spec, or `test.fixme()` with a reason |
|
||||
|
||||
Planner output is scratch — the `.spec.ts` is the source of truth. A
|
||||
`specs/` dir is `.gitignore`'d for planner use if you want it.
|
||||
|
||||
### CLI vs MCP
|
||||
|
||||
- **Subagents (MCP)**: use for the bounded plan → generate → heal loop.
|
||||
Token overhead is ~4× CLI but acceptable for structured sessions.
|
||||
- **`playwright-cli` directly**: use for quick locator checks, app
|
||||
exploration, ad-hoc debugging. Saves snapshots to `.playwright-cli/`
|
||||
instead of streaming into the LLM context window (~4× fewer tokens).
|
||||
|
||||
```bash
|
||||
playwright-cli open https://app.us.staging.signoz.cloud
|
||||
playwright-cli snapshot # element refs e1, e2, ...
|
||||
playwright-cli fill e5 "term"
|
||||
playwright-cli click e12
|
||||
playwright-cli screenshot
|
||||
playwright-cli console # errors
|
||||
playwright-cli network # requests
|
||||
playwright-cli state-save .playwright-cli/auth.json
|
||||
playwright-cli close
|
||||
```
|
||||
|
||||
For running and debugging test files, `yarn test:debug` / `yarn test:ui` /
|
||||
`yarn codegen` are faster than MCP for simple cases.
|
||||
65
docs/contributing/tests/integration.md
Normal file
65
docs/contributing/tests/integration.md
Normal file
@@ -0,0 +1,65 @@
|
||||
# Integration tests
|
||||
|
||||
Backend integration tests run against a containerized SigNoz stack brought
|
||||
up by pytest fixtures. Live under `tests/integration/`.
|
||||
|
||||
## Layout
|
||||
|
||||
```
|
||||
tests/integration/
|
||||
bootstrap/setup.py Stack lifecycle entrypoint (test_setup, test_teardown)
|
||||
tests/ Suites, one dir per feature area
|
||||
<suite>/ e.g. alerts, dashboard, querier, role, ...
|
||||
NN_<name>.py Numbered test files (collected in order)
|
||||
testdata/ JSON / JSONL / YAML data keyed by suite
|
||||
```
|
||||
|
||||
## Running
|
||||
|
||||
From `signoz/`:
|
||||
|
||||
```bash
|
||||
make py-test-setup # warm up stack (keeps containers under --reuse)
|
||||
make py-test # run all integration suites
|
||||
make py-test-teardown # free containers
|
||||
```
|
||||
|
||||
From `signoz/tests/`:
|
||||
|
||||
```bash
|
||||
uv sync # first time only
|
||||
uv run pytest --basetemp=./tmp/ -vv --reuse integration/bootstrap/setup.py::test_setup
|
||||
uv run pytest --basetemp=./tmp/ -vv --reuse integration/tests/<suite>/<file>.py
|
||||
```
|
||||
|
||||
Always pass `--reuse` — without it, pytest recreates containers on every
|
||||
invocation.
|
||||
|
||||
## Conventions
|
||||
|
||||
- **Filenames**: `NN_<snake_name>.py` (e.g. `01_register.py`). The numeric
|
||||
prefix orders execution within a suite.
|
||||
- **Suite directory**: one dir per feature area under `tests/`. Optionally
|
||||
`<suite>/conftest.py` for suite-local fixtures.
|
||||
- **Fixtures**: shared ones live in `tests/fixtures/` (registered via
|
||||
`tests/conftest.py`'s `pytest_plugins`). Reuse before adding new.
|
||||
- **Data**: test inputs / expected outputs live in `testdata/<suite>/`.
|
||||
Load via `fixtures.fs.get_testdata_file_path`.
|
||||
- **Style**: black + pylint via `make py-fmt` and `make py-lint` before
|
||||
committing (run from repo root).
|
||||
|
||||
## Adding a suite
|
||||
|
||||
1. Create `tests/integration/tests/<suite>/` with an empty `__init__.py`.
|
||||
2. Add `01_<entry>.py` with `test_<thing>(signoz: types.SigNoz)` functions.
|
||||
3. Import shared fixtures directly (e.g.
|
||||
`from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD`).
|
||||
4. If the suite needs bespoke setup, add `conftest.py` alongside the tests.
|
||||
5. Put any test data under `testdata/<suite>/`.
|
||||
|
||||
Running a single test while iterating:
|
||||
|
||||
```bash
|
||||
uv run pytest --basetemp=./tmp/ -vv --reuse \
|
||||
integration/tests/<suite>/<file>.py::test_<name>
|
||||
```
|
||||
19
tests/.dockerignore
Normal file
19
tests/.dockerignore
Normal file
@@ -0,0 +1,19 @@
|
||||
# Build context for tests/Dockerfile.seeder. Keep the context lean — the
|
||||
# seeder image only needs fixtures/ to be importable alongside seeder/,
|
||||
# plus pyproject.toml + uv.lock for dep install.
|
||||
|
||||
.venv
|
||||
.pytest_cache
|
||||
tmp
|
||||
**/__pycache__
|
||||
**/*.pyc
|
||||
|
||||
# e2e Playwright outputs and deps
|
||||
e2e/node_modules
|
||||
e2e/artifacts
|
||||
e2e/.auth
|
||||
e2e/.playwright-cli
|
||||
|
||||
# Integration-side outputs (if any stale dirs remain)
|
||||
integration/tmp
|
||||
integration/testdata
|
||||
35
tests/Dockerfile.seeder
Normal file
35
tests/Dockerfile.seeder
Normal file
@@ -0,0 +1,35 @@
|
||||
# HTTP seeder for Playwright e2e tests. Wraps the direct-ClickHouse-insert
|
||||
# helpers in tests/fixtures/{traces,logs,metrics}.py so a browser test can
|
||||
# seed telemetry with fine-grained control.
|
||||
#
|
||||
# Build context is tests/ (this file sits at its root) so `fixtures/` is
|
||||
# importable inside the image alongside `seeder/`.
|
||||
|
||||
FROM python:3.13-slim
|
||||
|
||||
COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends gcc libpq-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install project dependencies from the pytest project's pyproject.toml +
|
||||
# uv.lock so the seeder container's Python env matches local dev exactly
|
||||
# (single source of truth for versions; no parallel requirements.txt).
|
||||
# --no-install-project skips building the signoz-tests project itself
|
||||
# (there is no buildable package here — pyproject is used purely for dep
|
||||
# management alongside pythonpath = ["."]).
|
||||
COPY pyproject.toml uv.lock /app/
|
||||
RUN uv sync --frozen --no-install-project --no-dev
|
||||
ENV PATH="/app/.venv/bin:$PATH"
|
||||
|
||||
# Ship the whole fixtures/ package so server.py can `from fixtures.traces
|
||||
# import ...` with the same module path the pytest side uses.
|
||||
COPY fixtures /app/fixtures
|
||||
COPY seeder /app/seeder
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
CMD ["uvicorn", "seeder.server:app", "--host", "0.0.0.0", "--port", "8080"]
|
||||
46
tests/README.md
Normal file
46
tests/README.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# SigNoz Tests
|
||||
|
||||
Shared pytest project with two test trees that reuse the same fixture graph.
|
||||
|
||||
```
|
||||
tests/
|
||||
pyproject.toml Shared uv/pytest project (rootdir for both trees)
|
||||
conftest.py Registers pytest_plugins from fixtures/
|
||||
fixtures/ Shared Python fixtures: container bring-up, auth,
|
||||
telemetry inserts, API-seeding helpers
|
||||
integration/
|
||||
bootstrap/setup.py Brings backend up (stack lifecycle for iterative dev)
|
||||
tests/ Backend integration tests (pytest)
|
||||
testdata/ Integration-specific JSON/YAML
|
||||
e2e/
|
||||
bootstrap/setup.py Brings backend up + seeder; writes .env.local
|
||||
bootstrap/run.py One-command entrypoint: subprocesses `yarn test`
|
||||
tests/ Playwright specs (TS)
|
||||
playwright.config.ts Loads .env (user) + .env.local (generated) via dotenv
|
||||
seeder/ HTTP service providing per-test telemetry endpoints
|
||||
```
|
||||
|
||||
## Fixture ownership
|
||||
|
||||
- **Shared** (`tests/fixtures/`): anything that could be useful across trees —
|
||||
container bring-up, auth, direct telemetry inserts, API helpers.
|
||||
- **No global pre-seed**: e2e specs seed their own data. Telemetry goes
|
||||
through the seeder's `/telemetry/{traces,logs,metrics}` endpoints;
|
||||
dashboards / alert rules / org config go through the SigNoz REST API
|
||||
directly from the spec.
|
||||
|
||||
## Common commands
|
||||
|
||||
```bash
|
||||
# From signoz/:
|
||||
make py-test # Run all integration tests
|
||||
make py-test-setup # Warm up backend (for iterative dev)
|
||||
make py-test-teardown # Free containers
|
||||
|
||||
# From signoz/tests/:
|
||||
uv sync # First-time Python deps
|
||||
uv run pytest integration/tests/ # Integration suite
|
||||
uv run pytest --with-web e2e/bootstrap/run.py::test_e2e # Full e2e run
|
||||
```
|
||||
|
||||
See `e2e/README.md` for the e2e-specific workflow.
|
||||
@@ -17,12 +17,14 @@ pytest_plugins = [
|
||||
"fixtures.traces",
|
||||
"fixtures.metrics",
|
||||
"fixtures.meter",
|
||||
"fixtures.driver",
|
||||
"fixtures.browser",
|
||||
"fixtures.keycloak",
|
||||
"fixtures.idp",
|
||||
"fixtures.idputils",
|
||||
"fixtures.notification_channel",
|
||||
"fixtures.alerts",
|
||||
"fixtures.cloudintegrations",
|
||||
"fixtures.dashboards",
|
||||
"fixtures.seeder",
|
||||
]
|
||||
|
||||
|
||||
59
tests/e2e/.claude/agents/playwright-test-generator.md
Normal file
59
tests/e2e/.claude/agents/playwright-test-generator.md
Normal file
@@ -0,0 +1,59 @@
|
||||
---
|
||||
name: playwright-test-generator
|
||||
description: Use this agent when you need to create automated browser tests using Playwright. Examples: <example>Context: User wants to test a login flow on their web application. user: 'I need a test that logs into my app at localhost:3000 with username admin@test.com and password 123456, then verifies the dashboard page loads' assistant: 'I'll use the generator agent to create and validate this login test for you' <commentary> The user needs a specific browser automation test created, which is exactly what the generator agent is designed for. </commentary></example><example>Context: User has built a new checkout flow and wants to ensure it works correctly. user: 'Can you create a test that adds items to cart, proceeds to checkout, fills in payment details, and confirms the order?' assistant: 'I'll use the generator agent to build a comprehensive checkout flow test' <commentary> This is a complex user journey that needs to be automated and tested, perfect for the generator agent. </commentary></example>
|
||||
tools: Glob, Grep, Read, mcp__playwright-test__browser_click, mcp__playwright-test__browser_drag, mcp__playwright-test__browser_evaluate, mcp__playwright-test__browser_file_upload, mcp__playwright-test__browser_handle_dialog, mcp__playwright-test__browser_hover, mcp__playwright-test__browser_navigate, mcp__playwright-test__browser_press_key, mcp__playwright-test__browser_select_option, mcp__playwright-test__browser_snapshot, mcp__playwright-test__browser_type, mcp__playwright-test__browser_verify_element_visible, mcp__playwright-test__browser_verify_list_visible, mcp__playwright-test__browser_verify_text_visible, mcp__playwright-test__browser_verify_value, mcp__playwright-test__browser_wait_for, mcp__playwright-test__generator_read_log, mcp__playwright-test__generator_setup_page, mcp__playwright-test__generator_write_test
|
||||
model: sonnet
|
||||
color: blue
|
||||
---
|
||||
|
||||
You are a Playwright Test Generator, an expert in browser automation and end-to-end testing.
|
||||
Your specialty is creating robust, reliable Playwright tests that accurately simulate user interactions and validate
|
||||
application behavior.
|
||||
|
||||
# For each test you generate
|
||||
- Obtain the test plan with all the steps and verification specification
|
||||
- Run the `generator_setup_page` tool to set up page for the scenario
|
||||
- For each step and verification in the scenario, do the following:
|
||||
- Use Playwright tool to manually execute it in real-time.
|
||||
- Use the step description as the intent for each Playwright tool call.
|
||||
- Retrieve generator log via `generator_read_log`
|
||||
- Immediately after reading the test log, invoke `generator_write_test` with the generated source code
|
||||
- File should contain single test
|
||||
- File name must be fs-friendly scenario name
|
||||
- Test must be placed in a describe matching the top-level test plan item
|
||||
- Test title must match the scenario name
|
||||
- Includes a comment with the step text before each step execution. Do not duplicate comments if step requires
|
||||
multiple actions.
|
||||
- Always use best practices from the log when generating tests.
|
||||
|
||||
<example-generation>
|
||||
For following plan:
|
||||
|
||||
```markdown file=specs/plan.md
|
||||
### 1. Adding New Todos
|
||||
**Seed:** `tests/seed.spec.ts`
|
||||
|
||||
#### 1.1 Add Valid Todo
|
||||
**Steps:**
|
||||
1. Click in the "What needs to be done?" input field
|
||||
|
||||
#### 1.2 Add Multiple Todos
|
||||
...
|
||||
```
|
||||
|
||||
Following file is generated:
|
||||
|
||||
```ts file=add-valid-todo.spec.ts
|
||||
// spec: specs/plan.md
|
||||
// seed: tests/seed.spec.ts
|
||||
|
||||
test.describe('Adding New Todos', () => {
|
||||
test('Add Valid Todo', async { page } => {
|
||||
// 1. Click in the "What needs to be done?" input field
|
||||
await page.click(...);
|
||||
|
||||
...
|
||||
});
|
||||
});
|
||||
```
|
||||
</example-generation>
|
||||
45
tests/e2e/.claude/agents/playwright-test-healer.md
Normal file
45
tests/e2e/.claude/agents/playwright-test-healer.md
Normal file
@@ -0,0 +1,45 @@
|
||||
---
|
||||
name: playwright-test-healer
|
||||
description: Use this agent when you need to debug and fix failing Playwright tests. Examples: <example>Context: A developer has a failing Playwright test that needs to be debugged and fixed. user: 'The login test is failing, can you fix it?' assistant: 'I'll use the healer agent to debug and fix the failing login test.' <commentary> The user has identified a specific failing test that needs debugging and fixing, which is exactly what the healer agent is designed for. </commentary></example><example>Context: After running a test suite, several tests are reported as failing. user: 'Test user-registration.spec.ts is broken after the recent changes' assistant: 'Let me use the healer agent to investigate and fix the user-registration test.' <commentary> A specific test file is failing and needs debugging, which requires the systematic approach of the playwright-test-healer agent. </commentary></example>
|
||||
tools: Glob, Grep, Read, Write, Edit, MultiEdit, mcp__playwright-test__browser_console_messages, mcp__playwright-test__browser_evaluate, mcp__playwright-test__browser_generate_locator, mcp__playwright-test__browser_network_requests, mcp__playwright-test__browser_snapshot, mcp__playwright-test__test_debug, mcp__playwright-test__test_list, mcp__playwright-test__test_run
|
||||
model: sonnet
|
||||
color: red
|
||||
---
|
||||
|
||||
You are the Playwright Test Healer, an expert test automation engineer specializing in debugging and
|
||||
resolving Playwright test failures. Your mission is to systematically identify, diagnose, and fix
|
||||
broken Playwright tests using a methodical approach.
|
||||
|
||||
Your workflow:
|
||||
1. **Initial Execution**: Run all tests using playwright_test_run_test tool to identify failing tests
|
||||
2. **Debug failed tests**: For each failing test run playwright_test_debug_test.
|
||||
3. **Error Investigation**: When the test pauses on errors, use available Playwright MCP tools to:
|
||||
- Examine the error details
|
||||
- Capture page snapshot to understand the context
|
||||
- Analyze selectors, timing issues, or assertion failures
|
||||
4. **Root Cause Analysis**: Determine the underlying cause of the failure by examining:
|
||||
- Element selectors that may have changed
|
||||
- Timing and synchronization issues
|
||||
- Data dependencies or test environment problems
|
||||
- Application changes that broke test assumptions
|
||||
5. **Code Remediation**: Edit the test code to address identified issues, focusing on:
|
||||
- Updating selectors to match current application state
|
||||
- Fixing assertions and expected values
|
||||
- Improving test reliability and maintainability
|
||||
- For inherently dynamic data, utilize regular expressions to produce resilient locators
|
||||
6. **Verification**: Restart the test after each fix to validate the changes
|
||||
7. **Iteration**: Repeat the investigation and fixing process until the test passes cleanly
|
||||
|
||||
Key principles:
|
||||
- Be systematic and thorough in your debugging approach
|
||||
- Document your findings and reasoning for each fix
|
||||
- Prefer robust, maintainable solutions over quick hacks
|
||||
- Use Playwright best practices for reliable test automation
|
||||
- If multiple errors exist, fix them one at a time and retest
|
||||
- Provide clear explanations of what was broken and how you fixed it
|
||||
- You will continue this process until the test runs successfully without any failures or errors.
|
||||
- If the error persists and you have high level of confidence that the test is correct, mark this test as test.fixme()
|
||||
so that it is skipped during the execution. Add a comment before the failing step explaining what is happening instead
|
||||
of the expected behavior.
|
||||
- Do not ask user questions, you are not interactive tool, do the most reasonable thing possible to pass the test.
|
||||
- Never wait for networkidle or use other discouraged or deprecated apis
|
||||
99
tests/e2e/.claude/agents/playwright-test-planner.md
Normal file
99
tests/e2e/.claude/agents/playwright-test-planner.md
Normal file
@@ -0,0 +1,99 @@
|
||||
---
|
||||
name: playwright-test-planner
|
||||
description: Use this agent when you need to create comprehensive test plan for a web application or website. Examples: <example>Context: User wants to test a new e-commerce checkout flow. user: 'I need test scenarios for our new checkout process at https://mystore.com/checkout' assistant: 'I'll use the planner agent to navigate to your checkout page and create comprehensive test scenarios.' <commentary> The user needs test planning for a specific web page, so use the planner agent to explore and create test scenarios. </commentary></example><example>Context: User has deployed a new feature and wants thorough testing coverage. user: 'Can you help me test our new user dashboard at https://app.example.com/dashboard?' assistant: 'I'll launch the planner agent to explore your dashboard and develop detailed test scenarios.' <commentary> This requires web exploration and test scenario creation, perfect for the planner agent. </commentary></example>
|
||||
tools: Glob, Grep, Read, Write, WebFetch, mcp__playwright-test__browser_click, mcp__playwright-test__browser_close, mcp__playwright-test__browser_console_messages, mcp__playwright-test__browser_drag, mcp__playwright-test__browser_evaluate, mcp__playwright-test__browser_file_upload, mcp__playwright-test__browser_handle_dialog, mcp__playwright-test__browser_hover, mcp__playwright-test__browser_navigate, mcp__playwright-test__browser_navigate_back, mcp__playwright-test__browser_network_requests, mcp__playwright-test__browser_press_key, mcp__playwright-test__browser_select_option, mcp__playwright-test__browser_snapshot, mcp__playwright-test__browser_take_screenshot, mcp__playwright-test__browser_type, mcp__playwright-test__browser_wait_for, mcp__playwright-test__planner_setup_page
|
||||
model: sonnet
|
||||
color: green
|
||||
---
|
||||
|
||||
You are an expert web test planner with extensive experience in quality assurance, user experience testing, and test
|
||||
scenario design. Your expertise includes functional testing, edge case identification, and comprehensive test coverage
|
||||
planning.
|
||||
|
||||
You will:
|
||||
|
||||
1. **Inspect Source Component Structure**
|
||||
- For the feature under test, fetch the relevant source files from `https://github.com/SigNoz/signoz/` to understand the component hierarchy, props, state, and any conditional rendering paths
|
||||
- Use `WebFetch` to retrieve raw file contents from GitHub (e.g. `https://raw.githubusercontent.com/SigNoz/signoz/main/frontend/src/pages/<Feature>/index.tsx`)
|
||||
- Browse the directory listing at `https://github.com/SigNoz/signoz/tree/main/frontend/src/` to discover the correct paths if uncertain
|
||||
- Identify all interactive sub-components, loading/error states, permission guards, and feature flags exposed in the source — these reveal test scenarios not always visible from the UI alone
|
||||
|
||||
2. **Navigate and Explore**
|
||||
- Invoke the `planner_setup_page` tool once to set up page before using any other tools
|
||||
- Explore the browser snapshot
|
||||
- Do not take screenshots unless absolutely necessary
|
||||
- Use browser_* tools to navigate and discover interface
|
||||
- Thoroughly explore the interface, identifying all interactive elements, forms, navigation paths, and functionality
|
||||
|
||||
3. **Analyze User Flows**
|
||||
- Map out the primary user journeys and identify critical paths through the application
|
||||
- Consider different user types and their typical behaviors
|
||||
|
||||
4. **Design Comprehensive Scenarios**
|
||||
|
||||
Create detailed test scenarios that cover:
|
||||
- Happy path scenarios (normal user behavior)
|
||||
- Edge cases and boundary conditions
|
||||
- Error handling and validation
|
||||
|
||||
5. **Structure Test Plans**
|
||||
|
||||
Each scenario must include:
|
||||
- Clear, descriptive title
|
||||
- Detailed step-by-step instructions
|
||||
- Expected outcomes where appropriate
|
||||
- Assumptions about starting state (always assume blank/fresh state)
|
||||
- Success criteria and failure conditions
|
||||
|
||||
6. **Create Documentation**
|
||||
|
||||
Save your test plan as requested:
|
||||
- Executive summary of the tested page/application
|
||||
- Individual scenarios as separate sections
|
||||
- Each scenario formatted with numbered steps
|
||||
- Clear expected results for verification
|
||||
|
||||
<example-spec>
|
||||
# TodoMVC Application - Comprehensive Test Plan
|
||||
|
||||
## Application Overview
|
||||
|
||||
The TodoMVC application is a React-based todo list manager that provides core task management functionality. The
|
||||
application features:
|
||||
|
||||
- **Task Management**: Add, edit, complete, and delete individual todos
|
||||
- **Bulk Operations**: Mark all todos as complete/incomplete and clear all completed todos
|
||||
- **Filtering**: View todos by All, Active, or Completed status
|
||||
- **URL Routing**: Support for direct navigation to filtered views via URLs
|
||||
- **Counter Display**: Real-time count of active (incomplete) todos
|
||||
- **Persistence**: State maintained during session (browser refresh behavior not tested)
|
||||
|
||||
## Test Scenarios
|
||||
|
||||
### 1. Adding New Todos
|
||||
|
||||
**Seed:** `tests/seed.spec.ts`
|
||||
|
||||
#### 1.1 Add Valid Todo
|
||||
**Steps:**
|
||||
1. Click in the "What needs to be done?" input field
|
||||
2. Type "Buy groceries"
|
||||
3. Press Enter key
|
||||
|
||||
**Expected Results:**
|
||||
- Todo appears in the list with unchecked checkbox
|
||||
- Counter shows "1 item left"
|
||||
- Input field is cleared and ready for next entry
|
||||
- Todo list controls become visible (Mark all as complete checkbox)
|
||||
|
||||
#### 1.2
|
||||
...
|
||||
</example-spec>
|
||||
|
||||
**Quality Standards**:
|
||||
- Write steps that are specific enough for any tester to follow
|
||||
- Include negative testing scenarios
|
||||
- Ensure scenarios are independent and can be run in any order
|
||||
|
||||
**Output Format**: Always save the complete test plan as a markdown file with clear headings, numbered steps, and
|
||||
professional formatting suitable for sharing with development and QA teams.
|
||||
278
tests/e2e/.claude/skills/playwright-cli/SKILL.md
Normal file
278
tests/e2e/.claude/skills/playwright-cli/SKILL.md
Normal file
@@ -0,0 +1,278 @@
|
||||
---
|
||||
name: playwright-cli
|
||||
description: Automates browser interactions for web testing, form filling, screenshots, and data extraction. Use when the user needs to navigate websites, interact with web pages, fill forms, take screenshots, test web applications, or extract information from web pages.
|
||||
allowed-tools: Bash(playwright-cli:*)
|
||||
---
|
||||
|
||||
# Browser Automation with playwright-cli
|
||||
|
||||
## Quick start
|
||||
|
||||
```bash
|
||||
# open new browser
|
||||
playwright-cli open
|
||||
# navigate to a page
|
||||
playwright-cli goto https://playwright.dev
|
||||
# interact with the page using refs from the snapshot
|
||||
playwright-cli click e15
|
||||
playwright-cli type "page.click"
|
||||
playwright-cli press Enter
|
||||
# take a screenshot (rarely used, as snapshot is more common)
|
||||
playwright-cli screenshot
|
||||
# close the browser
|
||||
playwright-cli close
|
||||
```
|
||||
|
||||
## Commands
|
||||
|
||||
### Core
|
||||
|
||||
```bash
|
||||
playwright-cli open
|
||||
# open and navigate right away
|
||||
playwright-cli open https://example.com/
|
||||
playwright-cli goto https://playwright.dev
|
||||
playwright-cli type "search query"
|
||||
playwright-cli click e3
|
||||
playwright-cli dblclick e7
|
||||
playwright-cli fill e5 "user@example.com"
|
||||
playwright-cli drag e2 e8
|
||||
playwright-cli hover e4
|
||||
playwright-cli select e9 "option-value"
|
||||
playwright-cli upload ./document.pdf
|
||||
playwright-cli check e12
|
||||
playwright-cli uncheck e12
|
||||
playwright-cli snapshot
|
||||
playwright-cli snapshot --filename=after-click.yaml
|
||||
playwright-cli eval "document.title"
|
||||
playwright-cli eval "el => el.textContent" e5
|
||||
playwright-cli dialog-accept
|
||||
playwright-cli dialog-accept "confirmation text"
|
||||
playwright-cli dialog-dismiss
|
||||
playwright-cli resize 1920 1080
|
||||
playwright-cli close
|
||||
```
|
||||
|
||||
### Navigation
|
||||
|
||||
```bash
|
||||
playwright-cli go-back
|
||||
playwright-cli go-forward
|
||||
playwright-cli reload
|
||||
```
|
||||
|
||||
### Keyboard
|
||||
|
||||
```bash
|
||||
playwright-cli press Enter
|
||||
playwright-cli press ArrowDown
|
||||
playwright-cli keydown Shift
|
||||
playwright-cli keyup Shift
|
||||
```
|
||||
|
||||
### Mouse
|
||||
|
||||
```bash
|
||||
playwright-cli mousemove 150 300
|
||||
playwright-cli mousedown
|
||||
playwright-cli mousedown right
|
||||
playwright-cli mouseup
|
||||
playwright-cli mouseup right
|
||||
playwright-cli mousewheel 0 100
|
||||
```
|
||||
|
||||
### Save as
|
||||
|
||||
```bash
|
||||
playwright-cli screenshot
|
||||
playwright-cli screenshot e5
|
||||
playwright-cli screenshot --filename=page.png
|
||||
playwright-cli pdf --filename=page.pdf
|
||||
```
|
||||
|
||||
### Tabs
|
||||
|
||||
```bash
|
||||
playwright-cli tab-list
|
||||
playwright-cli tab-new
|
||||
playwright-cli tab-new https://example.com/page
|
||||
playwright-cli tab-close
|
||||
playwright-cli tab-close 2
|
||||
playwright-cli tab-select 0
|
||||
```
|
||||
|
||||
### Storage
|
||||
|
||||
```bash
|
||||
playwright-cli state-save
|
||||
playwright-cli state-save auth.json
|
||||
playwright-cli state-load auth.json
|
||||
|
||||
# Cookies
|
||||
playwright-cli cookie-list
|
||||
playwright-cli cookie-list --domain=example.com
|
||||
playwright-cli cookie-get session_id
|
||||
playwright-cli cookie-set session_id abc123
|
||||
playwright-cli cookie-set session_id abc123 --domain=example.com --httpOnly --secure
|
||||
playwright-cli cookie-delete session_id
|
||||
playwright-cli cookie-clear
|
||||
|
||||
# LocalStorage
|
||||
playwright-cli localstorage-list
|
||||
playwright-cli localstorage-get theme
|
||||
playwright-cli localstorage-set theme dark
|
||||
playwright-cli localstorage-delete theme
|
||||
playwright-cli localstorage-clear
|
||||
|
||||
# SessionStorage
|
||||
playwright-cli sessionstorage-list
|
||||
playwright-cli sessionstorage-get step
|
||||
playwright-cli sessionstorage-set step 3
|
||||
playwright-cli sessionstorage-delete step
|
||||
playwright-cli sessionstorage-clear
|
||||
```
|
||||
|
||||
### Network
|
||||
|
||||
```bash
|
||||
playwright-cli route "**/*.jpg" --status=404
|
||||
playwright-cli route "https://api.example.com/**" --body='{"mock": true}'
|
||||
playwright-cli route-list
|
||||
playwright-cli unroute "**/*.jpg"
|
||||
playwright-cli unroute
|
||||
```
|
||||
|
||||
### DevTools
|
||||
|
||||
```bash
|
||||
playwright-cli console
|
||||
playwright-cli console warning
|
||||
playwright-cli network
|
||||
playwright-cli run-code "async page => await page.context().grantPermissions(['geolocation'])"
|
||||
playwright-cli tracing-start
|
||||
playwright-cli tracing-stop
|
||||
playwright-cli video-start
|
||||
playwright-cli video-stop video.webm
|
||||
```
|
||||
|
||||
## Open parameters
|
||||
```bash
|
||||
# Use specific browser when creating session
|
||||
playwright-cli open --browser=chrome
|
||||
playwright-cli open --browser=firefox
|
||||
playwright-cli open --browser=webkit
|
||||
playwright-cli open --browser=msedge
|
||||
# Connect to browser via extension
|
||||
playwright-cli open --extension
|
||||
|
||||
# Use persistent profile (by default profile is in-memory)
|
||||
playwright-cli open --persistent
|
||||
# Use persistent profile with custom directory
|
||||
playwright-cli open --profile=/path/to/profile
|
||||
|
||||
# Start with config file
|
||||
playwright-cli open --config=my-config.json
|
||||
|
||||
# Close the browser
|
||||
playwright-cli close
|
||||
# Delete user data for the default session
|
||||
playwright-cli delete-data
|
||||
```
|
||||
|
||||
## Snapshots
|
||||
|
||||
After each command, playwright-cli provides a snapshot of the current browser state.
|
||||
|
||||
```bash
|
||||
> playwright-cli goto https://example.com
|
||||
### Page
|
||||
- Page URL: https://example.com/
|
||||
- Page Title: Example Domain
|
||||
### Snapshot
|
||||
[Snapshot](.playwright-cli/page-2026-02-14T19-22-42-679Z.yml)
|
||||
```
|
||||
|
||||
You can also take a snapshot on demand using `playwright-cli snapshot` command.
|
||||
|
||||
If `--filename` is not provided, a new snapshot file is created with a timestamp. Default to automatic file naming, use `--filename=` when artifact is a part of the workflow result.
|
||||
|
||||
## Browser Sessions
|
||||
|
||||
```bash
|
||||
# create new browser session named "mysession" with persistent profile
|
||||
playwright-cli -s=mysession open example.com --persistent
|
||||
# same with manually specified profile directory (use when requested explicitly)
|
||||
playwright-cli -s=mysession open example.com --profile=/path/to/profile
|
||||
playwright-cli -s=mysession click e6
|
||||
playwright-cli -s=mysession close # stop a named browser
|
||||
playwright-cli -s=mysession delete-data # delete user data for persistent session
|
||||
|
||||
playwright-cli list
|
||||
# Close all browsers
|
||||
playwright-cli close-all
|
||||
# Forcefully kill all browser processes
|
||||
playwright-cli kill-all
|
||||
```
|
||||
|
||||
## Local installation
|
||||
|
||||
In some cases user might want to install playwright-cli locally. If running globally available `playwright-cli` binary fails, use `npx playwright-cli` to run the commands. For example:
|
||||
|
||||
```bash
|
||||
npx playwright-cli open https://example.com
|
||||
npx playwright-cli click e1
|
||||
```
|
||||
|
||||
## Example: Form submission
|
||||
|
||||
```bash
|
||||
playwright-cli open https://example.com/form
|
||||
playwright-cli snapshot
|
||||
|
||||
playwright-cli fill e1 "user@example.com"
|
||||
playwright-cli fill e2 "password123"
|
||||
playwright-cli click e3
|
||||
playwright-cli snapshot
|
||||
playwright-cli close
|
||||
```
|
||||
|
||||
## Example: Multi-tab workflow
|
||||
|
||||
```bash
|
||||
playwright-cli open https://example.com
|
||||
playwright-cli tab-new https://example.com/other
|
||||
playwright-cli tab-list
|
||||
playwright-cli tab-select 0
|
||||
playwright-cli snapshot
|
||||
playwright-cli close
|
||||
```
|
||||
|
||||
## Example: Debugging with DevTools
|
||||
|
||||
```bash
|
||||
playwright-cli open https://example.com
|
||||
playwright-cli click e4
|
||||
playwright-cli fill e7 "test"
|
||||
playwright-cli console
|
||||
playwright-cli network
|
||||
playwright-cli close
|
||||
```
|
||||
|
||||
```bash
|
||||
playwright-cli open https://example.com
|
||||
playwright-cli tracing-start
|
||||
playwright-cli click e4
|
||||
playwright-cli fill e7 "test"
|
||||
playwright-cli tracing-stop
|
||||
playwright-cli close
|
||||
```
|
||||
|
||||
## Specific tasks
|
||||
|
||||
* **Request mocking** [references/request-mocking.md](references/request-mocking.md)
|
||||
* **Running Playwright code** [references/running-code.md](references/running-code.md)
|
||||
* **Browser session management** [references/session-management.md](references/session-management.md)
|
||||
* **Storage state (cookies, localStorage)** [references/storage-state.md](references/storage-state.md)
|
||||
* **Test generation** [references/test-generation.md](references/test-generation.md)
|
||||
* **Tracing** [references/tracing.md](references/tracing.md)
|
||||
* **Video recording** [references/video-recording.md](references/video-recording.md)
|
||||
@@ -0,0 +1,87 @@
|
||||
# Request Mocking
|
||||
|
||||
Intercept, mock, modify, and block network requests.
|
||||
|
||||
## CLI Route Commands
|
||||
|
||||
```bash
|
||||
# Mock with custom status
|
||||
playwright-cli route "**/*.jpg" --status=404
|
||||
|
||||
# Mock with JSON body
|
||||
playwright-cli route "**/api/users" --body='[{"id":1,"name":"Alice"}]' --content-type=application/json
|
||||
|
||||
# Mock with custom headers
|
||||
playwright-cli route "**/api/data" --body='{"ok":true}' --header="X-Custom: value"
|
||||
|
||||
# Remove headers from requests
|
||||
playwright-cli route "**/*" --remove-header=cookie,authorization
|
||||
|
||||
# List active routes
|
||||
playwright-cli route-list
|
||||
|
||||
# Remove a route or all routes
|
||||
playwright-cli unroute "**/*.jpg"
|
||||
playwright-cli unroute
|
||||
```
|
||||
|
||||
## URL Patterns
|
||||
|
||||
```
|
||||
**/api/users - Exact path match
|
||||
**/api/*/details - Wildcard in path
|
||||
**/*.{png,jpg,jpeg} - Match file extensions
|
||||
**/search?q=* - Match query parameters
|
||||
```
|
||||
|
||||
## Advanced Mocking with run-code
|
||||
|
||||
For conditional responses, request body inspection, response modification, or delays:
|
||||
|
||||
### Conditional Response Based on Request
|
||||
|
||||
```bash
|
||||
playwright-cli run-code "async page => {
|
||||
await page.route('**/api/login', route => {
|
||||
const body = route.request().postDataJSON();
|
||||
if (body.username === 'admin') {
|
||||
route.fulfill({ body: JSON.stringify({ token: 'mock-token' }) });
|
||||
} else {
|
||||
route.fulfill({ status: 401, body: JSON.stringify({ error: 'Invalid' }) });
|
||||
}
|
||||
});
|
||||
}"
|
||||
```
|
||||
|
||||
### Modify Real Response
|
||||
|
||||
```bash
|
||||
playwright-cli run-code "async page => {
|
||||
await page.route('**/api/user', async route => {
|
||||
const response = await route.fetch();
|
||||
const json = await response.json();
|
||||
json.isPremium = true;
|
||||
await route.fulfill({ response, json });
|
||||
});
|
||||
}"
|
||||
```
|
||||
|
||||
### Simulate Network Failures
|
||||
|
||||
```bash
|
||||
playwright-cli run-code "async page => {
|
||||
await page.route('**/api/offline', route => route.abort('internetdisconnected'));
|
||||
}"
|
||||
# Options: connectionrefused, timedout, connectionreset, internetdisconnected
|
||||
```
|
||||
|
||||
### Delayed Response
|
||||
|
||||
```bash
|
||||
playwright-cli run-code "async page => {
|
||||
await page.route('**/api/slow', async route => {
|
||||
await new Promise(r => setTimeout(r, 3000));
|
||||
route.fulfill({ body: JSON.stringify({ data: 'loaded' }) });
|
||||
});
|
||||
}"
|
||||
```
|
||||
@@ -0,0 +1,232 @@
|
||||
# Running Custom Playwright Code
|
||||
|
||||
Use `run-code` to execute arbitrary Playwright code for advanced scenarios not covered by CLI commands.
|
||||
|
||||
## Syntax
|
||||
|
||||
```bash
|
||||
playwright-cli run-code "async page => {
|
||||
// Your Playwright code here
|
||||
// Access page.context() for browser context operations
|
||||
}"
|
||||
```
|
||||
|
||||
## Geolocation
|
||||
|
||||
```bash
|
||||
# Grant geolocation permission and set location
|
||||
playwright-cli run-code "async page => {
|
||||
await page.context().grantPermissions(['geolocation']);
|
||||
await page.context().setGeolocation({ latitude: 37.7749, longitude: -122.4194 });
|
||||
}"
|
||||
|
||||
# Set location to London
|
||||
playwright-cli run-code "async page => {
|
||||
await page.context().grantPermissions(['geolocation']);
|
||||
await page.context().setGeolocation({ latitude: 51.5074, longitude: -0.1278 });
|
||||
}"
|
||||
|
||||
# Clear geolocation override
|
||||
playwright-cli run-code "async page => {
|
||||
await page.context().clearPermissions();
|
||||
}"
|
||||
```
|
||||
|
||||
## Permissions
|
||||
|
||||
```bash
|
||||
# Grant multiple permissions
|
||||
playwright-cli run-code "async page => {
|
||||
await page.context().grantPermissions([
|
||||
'geolocation',
|
||||
'notifications',
|
||||
'camera',
|
||||
'microphone'
|
||||
]);
|
||||
}"
|
||||
|
||||
# Grant permissions for specific origin
|
||||
playwright-cli run-code "async page => {
|
||||
await page.context().grantPermissions(['clipboard-read'], {
|
||||
origin: 'https://example.com'
|
||||
});
|
||||
}"
|
||||
```
|
||||
|
||||
## Media Emulation
|
||||
|
||||
```bash
|
||||
# Emulate dark color scheme
|
||||
playwright-cli run-code "async page => {
|
||||
await page.emulateMedia({ colorScheme: 'dark' });
|
||||
}"
|
||||
|
||||
# Emulate light color scheme
|
||||
playwright-cli run-code "async page => {
|
||||
await page.emulateMedia({ colorScheme: 'light' });
|
||||
}"
|
||||
|
||||
# Emulate reduced motion
|
||||
playwright-cli run-code "async page => {
|
||||
await page.emulateMedia({ reducedMotion: 'reduce' });
|
||||
}"
|
||||
|
||||
# Emulate print media
|
||||
playwright-cli run-code "async page => {
|
||||
await page.emulateMedia({ media: 'print' });
|
||||
}"
|
||||
```
|
||||
|
||||
## Wait Strategies
|
||||
|
||||
```bash
|
||||
# Wait for network idle
|
||||
playwright-cli run-code "async page => {
|
||||
await page.waitForLoadState('networkidle');
|
||||
}"
|
||||
|
||||
# Wait for specific element
|
||||
playwright-cli run-code "async page => {
|
||||
await page.waitForSelector('.loading', { state: 'hidden' });
|
||||
}"
|
||||
|
||||
# Wait for function to return true
|
||||
playwright-cli run-code "async page => {
|
||||
await page.waitForFunction(() => window.appReady === true);
|
||||
}"
|
||||
|
||||
# Wait with timeout
|
||||
playwright-cli run-code "async page => {
|
||||
await page.waitForSelector('.result', { timeout: 10000 });
|
||||
}"
|
||||
```
|
||||
|
||||
## Frames and Iframes
|
||||
|
||||
```bash
|
||||
# Work with iframe
|
||||
playwright-cli run-code "async page => {
|
||||
const frame = page.locator('iframe#my-iframe').contentFrame();
|
||||
await frame.locator('button').click();
|
||||
}"
|
||||
|
||||
# Get all frames
|
||||
playwright-cli run-code "async page => {
|
||||
const frames = page.frames();
|
||||
return frames.map(f => f.url());
|
||||
}"
|
||||
```
|
||||
|
||||
## File Downloads
|
||||
|
||||
```bash
|
||||
# Handle file download
|
||||
playwright-cli run-code "async page => {
|
||||
const [download] = await Promise.all([
|
||||
page.waitForEvent('download'),
|
||||
page.click('a.download-link')
|
||||
]);
|
||||
await download.saveAs('./downloaded-file.pdf');
|
||||
return download.suggestedFilename();
|
||||
}"
|
||||
```
|
||||
|
||||
## Clipboard
|
||||
|
||||
```bash
|
||||
# Read clipboard (requires permission)
|
||||
playwright-cli run-code "async page => {
|
||||
await page.context().grantPermissions(['clipboard-read']);
|
||||
return await page.evaluate(() => navigator.clipboard.readText());
|
||||
}"
|
||||
|
||||
# Write to clipboard
|
||||
playwright-cli run-code "async page => {
|
||||
await page.evaluate(text => navigator.clipboard.writeText(text), 'Hello clipboard!');
|
||||
}"
|
||||
```
|
||||
|
||||
## Page Information
|
||||
|
||||
```bash
|
||||
# Get page title
|
||||
playwright-cli run-code "async page => {
|
||||
return await page.title();
|
||||
}"
|
||||
|
||||
# Get current URL
|
||||
playwright-cli run-code "async page => {
|
||||
return page.url();
|
||||
}"
|
||||
|
||||
# Get page content
|
||||
playwright-cli run-code "async page => {
|
||||
return await page.content();
|
||||
}"
|
||||
|
||||
# Get viewport size
|
||||
playwright-cli run-code "async page => {
|
||||
return page.viewportSize();
|
||||
}"
|
||||
```
|
||||
|
||||
## JavaScript Execution
|
||||
|
||||
```bash
|
||||
# Execute JavaScript and return result
|
||||
playwright-cli run-code "async page => {
|
||||
return await page.evaluate(() => {
|
||||
return {
|
||||
userAgent: navigator.userAgent,
|
||||
language: navigator.language,
|
||||
cookiesEnabled: navigator.cookieEnabled
|
||||
};
|
||||
});
|
||||
}"
|
||||
|
||||
# Pass arguments to evaluate
|
||||
playwright-cli run-code "async page => {
|
||||
const multiplier = 5;
|
||||
return await page.evaluate(m => document.querySelectorAll('li').length * m, multiplier);
|
||||
}"
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
```bash
|
||||
# Try-catch in run-code
|
||||
playwright-cli run-code "async page => {
|
||||
try {
|
||||
await page.click('.maybe-missing', { timeout: 1000 });
|
||||
return 'clicked';
|
||||
} catch (e) {
|
||||
return 'element not found';
|
||||
}
|
||||
}"
|
||||
```
|
||||
|
||||
## Complex Workflows
|
||||
|
||||
```bash
|
||||
# Login and save state
|
||||
playwright-cli run-code "async page => {
|
||||
await page.goto('https://example.com/login');
|
||||
await page.fill('input[name=email]', 'user@example.com');
|
||||
await page.fill('input[name=password]', 'secret');
|
||||
await page.click('button[type=submit]');
|
||||
await page.waitForURL('**/dashboard');
|
||||
await page.context().storageState({ path: 'auth.json' });
|
||||
return 'Login successful';
|
||||
}"
|
||||
|
||||
# Scrape data from multiple pages
|
||||
playwright-cli run-code "async page => {
|
||||
const results = [];
|
||||
for (let i = 1; i <= 3; i++) {
|
||||
await page.goto(\`https://example.com/page/\${i}\`);
|
||||
const items = await page.locator('.item').allTextContents();
|
||||
results.push(...items);
|
||||
}
|
||||
return results;
|
||||
}"
|
||||
```
|
||||
@@ -0,0 +1,169 @@
|
||||
# Browser Session Management
|
||||
|
||||
Run multiple isolated browser sessions concurrently with state persistence.
|
||||
|
||||
## Named Browser Sessions
|
||||
|
||||
Use `-s` flag to isolate browser contexts:
|
||||
|
||||
```bash
|
||||
# Browser 1: Authentication flow
|
||||
playwright-cli -s=auth open https://app.example.com/login
|
||||
|
||||
# Browser 2: Public browsing (separate cookies, storage)
|
||||
playwright-cli -s=public open https://example.com
|
||||
|
||||
# Commands are isolated by browser session
|
||||
playwright-cli -s=auth fill e1 "user@example.com"
|
||||
playwright-cli -s=public snapshot
|
||||
```
|
||||
|
||||
## Browser Session Isolation Properties
|
||||
|
||||
Each browser session has independent:
|
||||
- Cookies
|
||||
- LocalStorage / SessionStorage
|
||||
- IndexedDB
|
||||
- Cache
|
||||
- Browsing history
|
||||
- Open tabs
|
||||
|
||||
## Browser Session Commands
|
||||
|
||||
```bash
|
||||
# List all browser sessions
|
||||
playwright-cli list
|
||||
|
||||
# Stop a browser session (close the browser)
|
||||
playwright-cli close # stop the default browser
|
||||
playwright-cli -s=mysession close # stop a named browser
|
||||
|
||||
# Stop all browser sessions
|
||||
playwright-cli close-all
|
||||
|
||||
# Forcefully kill all daemon processes (for stale/zombie processes)
|
||||
playwright-cli kill-all
|
||||
|
||||
# Delete browser session user data (profile directory)
|
||||
playwright-cli delete-data # delete default browser data
|
||||
playwright-cli -s=mysession delete-data # delete named browser data
|
||||
```
|
||||
|
||||
## Environment Variable
|
||||
|
||||
Set a default browser session name via environment variable:
|
||||
|
||||
```bash
|
||||
export PLAYWRIGHT_CLI_SESSION="mysession"
|
||||
playwright-cli open example.com # Uses "mysession" automatically
|
||||
```
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### Concurrent Scraping
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# Scrape multiple sites concurrently
|
||||
|
||||
# Start all browsers
|
||||
playwright-cli -s=site1 open https://site1.com &
|
||||
playwright-cli -s=site2 open https://site2.com &
|
||||
playwright-cli -s=site3 open https://site3.com &
|
||||
wait
|
||||
|
||||
# Take snapshots from each
|
||||
playwright-cli -s=site1 snapshot
|
||||
playwright-cli -s=site2 snapshot
|
||||
playwright-cli -s=site3 snapshot
|
||||
|
||||
# Cleanup
|
||||
playwright-cli close-all
|
||||
```
|
||||
|
||||
### A/B Testing Sessions
|
||||
|
||||
```bash
|
||||
# Test different user experiences
|
||||
playwright-cli -s=variant-a open "https://app.com?variant=a"
|
||||
playwright-cli -s=variant-b open "https://app.com?variant=b"
|
||||
|
||||
# Compare
|
||||
playwright-cli -s=variant-a screenshot
|
||||
playwright-cli -s=variant-b screenshot
|
||||
```
|
||||
|
||||
### Persistent Profile
|
||||
|
||||
By default, browser profile is kept in memory only. Use `--persistent` flag on `open` to persist the browser profile to disk:
|
||||
|
||||
```bash
|
||||
# Use persistent profile (auto-generated location)
|
||||
playwright-cli open https://example.com --persistent
|
||||
|
||||
# Use persistent profile with custom directory
|
||||
playwright-cli open https://example.com --profile=/path/to/profile
|
||||
```
|
||||
|
||||
## Default Browser Session
|
||||
|
||||
When `-s` is omitted, commands use the default browser session:
|
||||
|
||||
```bash
|
||||
# These use the same default browser session
|
||||
playwright-cli open https://example.com
|
||||
playwright-cli snapshot
|
||||
playwright-cli close # Stops default browser
|
||||
```
|
||||
|
||||
## Browser Session Configuration
|
||||
|
||||
Configure a browser session with specific settings when opening:
|
||||
|
||||
```bash
|
||||
# Open with config file
|
||||
playwright-cli open https://example.com --config=.playwright/my-cli.json
|
||||
|
||||
# Open with specific browser
|
||||
playwright-cli open https://example.com --browser=firefox
|
||||
|
||||
# Open in headed mode
|
||||
playwright-cli open https://example.com --headed
|
||||
|
||||
# Open with persistent profile
|
||||
playwright-cli open https://example.com --persistent
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### 1. Name Browser Sessions Semantically
|
||||
|
||||
```bash
|
||||
# GOOD: Clear purpose
|
||||
playwright-cli -s=github-auth open https://github.com
|
||||
playwright-cli -s=docs-scrape open https://docs.example.com
|
||||
|
||||
# AVOID: Generic names
|
||||
playwright-cli -s=s1 open https://github.com
|
||||
```
|
||||
|
||||
### 2. Always Clean Up
|
||||
|
||||
```bash
|
||||
# Stop browsers when done
|
||||
playwright-cli -s=auth close
|
||||
playwright-cli -s=scrape close
|
||||
|
||||
# Or stop all at once
|
||||
playwright-cli close-all
|
||||
|
||||
# If browsers become unresponsive or zombie processes remain
|
||||
playwright-cli kill-all
|
||||
```
|
||||
|
||||
### 3. Delete Stale Browser Data
|
||||
|
||||
```bash
|
||||
# Remove old browser data to free disk space
|
||||
playwright-cli -s=oldsession delete-data
|
||||
```
|
||||
@@ -0,0 +1,275 @@
|
||||
# Storage Management
|
||||
|
||||
Manage cookies, localStorage, sessionStorage, and browser storage state.
|
||||
|
||||
## Storage State
|
||||
|
||||
Save and restore complete browser state including cookies and storage.
|
||||
|
||||
### Save Storage State
|
||||
|
||||
```bash
|
||||
# Save to auto-generated filename (storage-state-{timestamp}.json)
|
||||
playwright-cli state-save
|
||||
|
||||
# Save to specific filename
|
||||
playwright-cli state-save my-auth-state.json
|
||||
```
|
||||
|
||||
### Restore Storage State
|
||||
|
||||
```bash
|
||||
# Load storage state from file
|
||||
playwright-cli state-load my-auth-state.json
|
||||
|
||||
# Reload page to apply cookies
|
||||
playwright-cli open https://example.com
|
||||
```
|
||||
|
||||
### Storage State File Format
|
||||
|
||||
The saved file contains:
|
||||
|
||||
```json
|
||||
{
|
||||
"cookies": [
|
||||
{
|
||||
"name": "session_id",
|
||||
"value": "abc123",
|
||||
"domain": "example.com",
|
||||
"path": "/",
|
||||
"expires": 1735689600,
|
||||
"httpOnly": true,
|
||||
"secure": true,
|
||||
"sameSite": "Lax"
|
||||
}
|
||||
],
|
||||
"origins": [
|
||||
{
|
||||
"origin": "https://example.com",
|
||||
"localStorage": [
|
||||
{ "name": "theme", "value": "dark" },
|
||||
{ "name": "user_id", "value": "12345" }
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Cookies
|
||||
|
||||
### List All Cookies
|
||||
|
||||
```bash
|
||||
playwright-cli cookie-list
|
||||
```
|
||||
|
||||
### Filter Cookies by Domain
|
||||
|
||||
```bash
|
||||
playwright-cli cookie-list --domain=example.com
|
||||
```
|
||||
|
||||
### Filter Cookies by Path
|
||||
|
||||
```bash
|
||||
playwright-cli cookie-list --path=/api
|
||||
```
|
||||
|
||||
### Get Specific Cookie
|
||||
|
||||
```bash
|
||||
playwright-cli cookie-get session_id
|
||||
```
|
||||
|
||||
### Set a Cookie
|
||||
|
||||
```bash
|
||||
# Basic cookie
|
||||
playwright-cli cookie-set session abc123
|
||||
|
||||
# Cookie with options
|
||||
playwright-cli cookie-set session abc123 --domain=example.com --path=/ --httpOnly --secure --sameSite=Lax
|
||||
|
||||
# Cookie with expiration (Unix timestamp)
|
||||
playwright-cli cookie-set remember_me token123 --expires=1735689600
|
||||
```
|
||||
|
||||
### Delete a Cookie
|
||||
|
||||
```bash
|
||||
playwright-cli cookie-delete session_id
|
||||
```
|
||||
|
||||
### Clear All Cookies
|
||||
|
||||
```bash
|
||||
playwright-cli cookie-clear
|
||||
```
|
||||
|
||||
### Advanced: Multiple Cookies or Custom Options
|
||||
|
||||
For complex scenarios like adding multiple cookies at once, use `run-code`:
|
||||
|
||||
```bash
|
||||
playwright-cli run-code "async page => {
|
||||
await page.context().addCookies([
|
||||
{ name: 'session_id', value: 'sess_abc123', domain: 'example.com', path: '/', httpOnly: true },
|
||||
{ name: 'preferences', value: JSON.stringify({ theme: 'dark' }), domain: 'example.com', path: '/' }
|
||||
]);
|
||||
}"
|
||||
```
|
||||
|
||||
## Local Storage
|
||||
|
||||
### List All localStorage Items
|
||||
|
||||
```bash
|
||||
playwright-cli localstorage-list
|
||||
```
|
||||
|
||||
### Get Single Value
|
||||
|
||||
```bash
|
||||
playwright-cli localstorage-get token
|
||||
```
|
||||
|
||||
### Set Value
|
||||
|
||||
```bash
|
||||
playwright-cli localstorage-set theme dark
|
||||
```
|
||||
|
||||
### Set JSON Value
|
||||
|
||||
```bash
|
||||
playwright-cli localstorage-set user_settings '{"theme":"dark","language":"en"}'
|
||||
```
|
||||
|
||||
### Delete Single Item
|
||||
|
||||
```bash
|
||||
playwright-cli localstorage-delete token
|
||||
```
|
||||
|
||||
### Clear All localStorage
|
||||
|
||||
```bash
|
||||
playwright-cli localstorage-clear
|
||||
```
|
||||
|
||||
### Advanced: Multiple Operations
|
||||
|
||||
For complex scenarios like setting multiple values at once, use `run-code`:
|
||||
|
||||
```bash
|
||||
playwright-cli run-code "async page => {
|
||||
await page.evaluate(() => {
|
||||
localStorage.setItem('token', 'jwt_abc123');
|
||||
localStorage.setItem('user_id', '12345');
|
||||
localStorage.setItem('expires_at', Date.now() + 3600000);
|
||||
});
|
||||
}"
|
||||
```
|
||||
|
||||
## Session Storage
|
||||
|
||||
### List All sessionStorage Items
|
||||
|
||||
```bash
|
||||
playwright-cli sessionstorage-list
|
||||
```
|
||||
|
||||
### Get Single Value
|
||||
|
||||
```bash
|
||||
playwright-cli sessionstorage-get form_data
|
||||
```
|
||||
|
||||
### Set Value
|
||||
|
||||
```bash
|
||||
playwright-cli sessionstorage-set step 3
|
||||
```
|
||||
|
||||
### Delete Single Item
|
||||
|
||||
```bash
|
||||
playwright-cli sessionstorage-delete step
|
||||
```
|
||||
|
||||
### Clear sessionStorage
|
||||
|
||||
```bash
|
||||
playwright-cli sessionstorage-clear
|
||||
```
|
||||
|
||||
## IndexedDB
|
||||
|
||||
### List Databases
|
||||
|
||||
```bash
|
||||
playwright-cli run-code "async page => {
|
||||
return await page.evaluate(async () => {
|
||||
const databases = await indexedDB.databases();
|
||||
return databases;
|
||||
});
|
||||
}"
|
||||
```
|
||||
|
||||
### Delete Database
|
||||
|
||||
```bash
|
||||
playwright-cli run-code "async page => {
|
||||
await page.evaluate(() => {
|
||||
indexedDB.deleteDatabase('myDatabase');
|
||||
});
|
||||
}"
|
||||
```
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### Authentication State Reuse
|
||||
|
||||
```bash
|
||||
# Step 1: Login and save state
|
||||
playwright-cli open https://app.example.com/login
|
||||
playwright-cli snapshot
|
||||
playwright-cli fill e1 "user@example.com"
|
||||
playwright-cli fill e2 "password123"
|
||||
playwright-cli click e3
|
||||
|
||||
# Save the authenticated state
|
||||
playwright-cli state-save auth.json
|
||||
|
||||
# Step 2: Later, restore state and skip login
|
||||
playwright-cli state-load auth.json
|
||||
playwright-cli open https://app.example.com/dashboard
|
||||
# Already logged in!
|
||||
```
|
||||
|
||||
### Save and Restore Roundtrip
|
||||
|
||||
```bash
|
||||
# Set up authentication state
|
||||
playwright-cli open https://example.com
|
||||
playwright-cli eval "() => { document.cookie = 'session=abc123'; localStorage.setItem('user', 'john'); }"
|
||||
|
||||
# Save state to file
|
||||
playwright-cli state-save my-session.json
|
||||
|
||||
# ... later, in a new session ...
|
||||
|
||||
# Restore state
|
||||
playwright-cli state-load my-session.json
|
||||
playwright-cli open https://example.com
|
||||
# Cookies and localStorage are restored!
|
||||
```
|
||||
|
||||
## Security Notes
|
||||
|
||||
- Never commit storage state files containing auth tokens
|
||||
- Add `*.auth-state.json` to `.gitignore`
|
||||
- Delete state files after automation completes
|
||||
- Use environment variables for sensitive data
|
||||
- By default, sessions run in-memory mode which is safer for sensitive operations
|
||||
@@ -0,0 +1,88 @@
|
||||
# Test Generation
|
||||
|
||||
Generate Playwright test code automatically as you interact with the browser.
|
||||
|
||||
## How It Works
|
||||
|
||||
Every action you perform with `playwright-cli` generates corresponding Playwright TypeScript code.
|
||||
This code appears in the output and can be copied directly into your test files.
|
||||
|
||||
## Example Workflow
|
||||
|
||||
```bash
|
||||
# Start a session
|
||||
playwright-cli open https://example.com/login
|
||||
|
||||
# Take a snapshot to see elements
|
||||
playwright-cli snapshot
|
||||
# Output shows: e1 [textbox "Email"], e2 [textbox "Password"], e3 [button "Sign In"]
|
||||
|
||||
# Fill form fields - generates code automatically
|
||||
playwright-cli fill e1 "user@example.com"
|
||||
# Ran Playwright code:
|
||||
# await page.getByRole('textbox', { name: 'Email' }).fill('user@example.com');
|
||||
|
||||
playwright-cli fill e2 "password123"
|
||||
# Ran Playwright code:
|
||||
# await page.getByRole('textbox', { name: 'Password' }).fill('password123');
|
||||
|
||||
playwright-cli click e3
|
||||
# Ran Playwright code:
|
||||
# await page.getByRole('button', { name: 'Sign In' }).click();
|
||||
```
|
||||
|
||||
## Building a Test File
|
||||
|
||||
Collect the generated code into a Playwright test:
|
||||
|
||||
```typescript
|
||||
import { test, expect } from '@playwright/test';
|
||||
|
||||
test('login flow', async ({ page }) => {
|
||||
// Generated code from playwright-cli session:
|
||||
await page.goto('https://example.com/login');
|
||||
await page.getByRole('textbox', { name: 'Email' }).fill('user@example.com');
|
||||
await page.getByRole('textbox', { name: 'Password' }).fill('password123');
|
||||
await page.getByRole('button', { name: 'Sign In' }).click();
|
||||
|
||||
// Add assertions
|
||||
await expect(page).toHaveURL(/.*dashboard/);
|
||||
});
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### 1. Use Semantic Locators
|
||||
|
||||
The generated code uses role-based locators when possible, which are more resilient:
|
||||
|
||||
```typescript
|
||||
// Generated (good - semantic)
|
||||
await page.getByRole('button', { name: 'Submit' }).click();
|
||||
|
||||
// Avoid (fragile - CSS selectors)
|
||||
await page.locator('#submit-btn').click();
|
||||
```
|
||||
|
||||
### 2. Explore Before Recording
|
||||
|
||||
Take snapshots to understand the page structure before recording actions:
|
||||
|
||||
```bash
|
||||
playwright-cli open https://example.com
|
||||
playwright-cli snapshot
|
||||
# Review the element structure
|
||||
playwright-cli click e5
|
||||
```
|
||||
|
||||
### 3. Add Assertions Manually
|
||||
|
||||
Generated code captures actions but not assertions. Add expectations in your test:
|
||||
|
||||
```typescript
|
||||
// Generated action
|
||||
await page.getByRole('button', { name: 'Submit' }).click();
|
||||
|
||||
// Manual assertion
|
||||
await expect(page.getByText('Success')).toBeVisible();
|
||||
```
|
||||
139
tests/e2e/.claude/skills/playwright-cli/references/tracing.md
Normal file
139
tests/e2e/.claude/skills/playwright-cli/references/tracing.md
Normal file
@@ -0,0 +1,139 @@
|
||||
# Tracing
|
||||
|
||||
Capture detailed execution traces for debugging and analysis. Traces include DOM snapshots, screenshots, network activity, and console logs.
|
||||
|
||||
## Basic Usage
|
||||
|
||||
```bash
|
||||
# Start trace recording
|
||||
playwright-cli tracing-start
|
||||
|
||||
# Perform actions
|
||||
playwright-cli open https://example.com
|
||||
playwright-cli click e1
|
||||
playwright-cli fill e2 "test"
|
||||
|
||||
# Stop trace recording
|
||||
playwright-cli tracing-stop
|
||||
```
|
||||
|
||||
## Trace Output Files
|
||||
|
||||
When you start tracing, Playwright creates a `traces/` directory with several files:
|
||||
|
||||
### `trace-{timestamp}.trace`
|
||||
|
||||
**Action log** - The main trace file containing:
|
||||
- Every action performed (clicks, fills, navigations)
|
||||
- DOM snapshots before and after each action
|
||||
- Screenshots at each step
|
||||
- Timing information
|
||||
- Console messages
|
||||
- Source locations
|
||||
|
||||
### `trace-{timestamp}.network`
|
||||
|
||||
**Network log** - Complete network activity:
|
||||
- All HTTP requests and responses
|
||||
- Request headers and bodies
|
||||
- Response headers and bodies
|
||||
- Timing (DNS, connect, TLS, TTFB, download)
|
||||
- Resource sizes
|
||||
- Failed requests and errors
|
||||
|
||||
### `resources/`
|
||||
|
||||
**Resources directory** - Cached resources:
|
||||
- Images, fonts, stylesheets, scripts
|
||||
- Response bodies for replay
|
||||
- Assets needed to reconstruct page state
|
||||
|
||||
## What Traces Capture
|
||||
|
||||
| Category | Details |
|
||||
|----------|---------|
|
||||
| **Actions** | Clicks, fills, hovers, keyboard input, navigations |
|
||||
| **DOM** | Full DOM snapshot before/after each action |
|
||||
| **Screenshots** | Visual state at each step |
|
||||
| **Network** | All requests, responses, headers, bodies, timing |
|
||||
| **Console** | All console.log, warn, error messages |
|
||||
| **Timing** | Precise timing for each operation |
|
||||
|
||||
## Use Cases
|
||||
|
||||
### Debugging Failed Actions
|
||||
|
||||
```bash
|
||||
playwright-cli tracing-start
|
||||
playwright-cli open https://app.example.com
|
||||
|
||||
# This click fails - why?
|
||||
playwright-cli click e5
|
||||
|
||||
playwright-cli tracing-stop
|
||||
# Open trace to see DOM state when click was attempted
|
||||
```
|
||||
|
||||
### Analyzing Performance
|
||||
|
||||
```bash
|
||||
playwright-cli tracing-start
|
||||
playwright-cli open https://slow-site.com
|
||||
playwright-cli tracing-stop
|
||||
|
||||
# View network waterfall to identify slow resources
|
||||
```
|
||||
|
||||
### Capturing Evidence
|
||||
|
||||
```bash
|
||||
# Record a complete user flow for documentation
|
||||
playwright-cli tracing-start
|
||||
|
||||
playwright-cli open https://app.example.com/checkout
|
||||
playwright-cli fill e1 "4111111111111111"
|
||||
playwright-cli fill e2 "12/25"
|
||||
playwright-cli fill e3 "123"
|
||||
playwright-cli click e4
|
||||
|
||||
playwright-cli tracing-stop
|
||||
# Trace shows exact sequence of events
|
||||
```
|
||||
|
||||
## Trace vs Video vs Screenshot
|
||||
|
||||
| Feature | Trace | Video | Screenshot |
|
||||
|---------|-------|-------|------------|
|
||||
| **Format** | .trace file | .webm video | .png/.jpeg image |
|
||||
| **DOM inspection** | Yes | No | No |
|
||||
| **Network details** | Yes | No | No |
|
||||
| **Step-by-step replay** | Yes | Continuous | Single frame |
|
||||
| **File size** | Medium | Large | Small |
|
||||
| **Best for** | Debugging | Demos | Quick capture |
|
||||
|
||||
## Best Practices
|
||||
|
||||
### 1. Start Tracing Before the Problem
|
||||
|
||||
```bash
|
||||
# Trace the entire flow, not just the failing step
|
||||
playwright-cli tracing-start
|
||||
playwright-cli open https://example.com
|
||||
# ... all steps leading to the issue ...
|
||||
playwright-cli tracing-stop
|
||||
```
|
||||
|
||||
### 2. Clean Up Old Traces
|
||||
|
||||
Traces can consume significant disk space:
|
||||
|
||||
```bash
|
||||
# Remove traces older than 7 days
|
||||
find .playwright-cli/traces -mtime +7 -delete
|
||||
```
|
||||
|
||||
## Limitations
|
||||
|
||||
- Traces add overhead to automation
|
||||
- Large traces can consume significant disk space
|
||||
- Some dynamic content may not replay perfectly
|
||||
@@ -0,0 +1,43 @@
|
||||
# Video Recording
|
||||
|
||||
Capture browser automation sessions as video for debugging, documentation, or verification. Produces WebM (VP8/VP9 codec).
|
||||
|
||||
## Basic Recording
|
||||
|
||||
```bash
|
||||
# Start recording
|
||||
playwright-cli video-start
|
||||
|
||||
# Perform actions
|
||||
playwright-cli open https://example.com
|
||||
playwright-cli snapshot
|
||||
playwright-cli click e1
|
||||
playwright-cli fill e2 "test input"
|
||||
|
||||
# Stop and save
|
||||
playwright-cli video-stop demo.webm
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### 1. Use Descriptive Filenames
|
||||
|
||||
```bash
|
||||
# Include context in filename
|
||||
playwright-cli video-stop recordings/login-flow-2024-01-15.webm
|
||||
playwright-cli video-stop recordings/checkout-test-run-42.webm
|
||||
```
|
||||
|
||||
## Tracing vs Video
|
||||
|
||||
| Feature | Video | Tracing |
|
||||
|---------|-------|---------|
|
||||
| Output | WebM file | Trace file (viewable in Trace Viewer) |
|
||||
| Shows | Visual recording | DOM snapshots, network, console, actions |
|
||||
| Use case | Demos, documentation | Debugging, analysis |
|
||||
| Size | Larger | Smaller |
|
||||
|
||||
## Limitations
|
||||
|
||||
- Recording adds slight overhead to automation
|
||||
- Large recordings can consume significant disk space
|
||||
15
tests/e2e/.env.example
Normal file
15
tests/e2e/.env.example
Normal file
@@ -0,0 +1,15 @@
|
||||
# Copy this to .env and fill in values for staging-mode runs.
|
||||
#
|
||||
# This file (.env) holds user-provided defaults — staging credentials, role
|
||||
# override. It is loaded by playwright.config.ts via dotenv.
|
||||
#
|
||||
# Local-mode runs (`cd tests && uv run pytest ... e2e/bootstrap/setup.py::test_setup`)
|
||||
# bring up a containerized backend and write .env.local, which overrides .env.
|
||||
# You do NOT need to touch this file for local mode.
|
||||
|
||||
# Staging base URL (set to opt out of local backend bring-up)
|
||||
SIGNOZ_E2E_BASE_URL=https://app.us.staging.signoz.cloud
|
||||
|
||||
# Test credentials (required only when SIGNOZ_E2E_BASE_URL is set — i.e. staging mode)
|
||||
SIGNOZ_E2E_USERNAME=
|
||||
SIGNOZ_E2E_PASSWORD=
|
||||
38
tests/e2e/.eslintignore
Normal file
38
tests/e2e/.eslintignore
Normal file
@@ -0,0 +1,38 @@
|
||||
# Dependencies
|
||||
node_modules/
|
||||
|
||||
# Build outputs
|
||||
dist/
|
||||
build/
|
||||
|
||||
# Test results
|
||||
test-results/
|
||||
playwright-report/
|
||||
coverage/
|
||||
|
||||
# Environment files
|
||||
.env
|
||||
.env.local
|
||||
.env.production
|
||||
|
||||
# Editor files
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
|
||||
# OS files
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
|
||||
# Runtime data
|
||||
pids
|
||||
*.pid
|
||||
*.seed
|
||||
*.pid.lock
|
||||
68
tests/e2e/.eslintrc.js
Normal file
68
tests/e2e/.eslintrc.js
Normal file
@@ -0,0 +1,68 @@
|
||||
module.exports = {
|
||||
parser: '@typescript-eslint/parser',
|
||||
parserOptions: {
|
||||
ecmaVersion: 2022,
|
||||
sourceType: 'module',
|
||||
},
|
||||
extends: [
|
||||
'eslint:recommended',
|
||||
'plugin:@typescript-eslint/recommended',
|
||||
'plugin:playwright/recommended',
|
||||
],
|
||||
env: {
|
||||
node: true,
|
||||
es2022: true,
|
||||
},
|
||||
rules: {
|
||||
// Code Quality
|
||||
'@typescript-eslint/no-unused-vars': 'error',
|
||||
'@typescript-eslint/no-explicit-any': 'warn',
|
||||
'prefer-const': 'error',
|
||||
'no-var': 'error',
|
||||
|
||||
// Formatting Rules (ESLint handles formatting)
|
||||
'semi': ['error', 'always'],
|
||||
'quotes': ['error', 'single', { avoidEscape: true }],
|
||||
'comma-dangle': ['error', 'always-multiline'],
|
||||
'indent': ['error', 2, { SwitchCase: 1 }],
|
||||
'object-curly-spacing': ['error', 'always'],
|
||||
'array-bracket-spacing': ['error', 'never'],
|
||||
'space-before-function-paren': ['error', {
|
||||
anonymous: 'always',
|
||||
named: 'never',
|
||||
asyncArrow: 'always',
|
||||
}],
|
||||
'keyword-spacing': 'error',
|
||||
'space-infix-ops': 'error',
|
||||
'eol-last': 'error',
|
||||
'no-trailing-spaces': 'error',
|
||||
'no-multiple-empty-lines': ['error', { max: 2, maxEOF: 1 }],
|
||||
|
||||
// Playwright-specific (enhanced)
|
||||
'playwright/expect-expect': 'error',
|
||||
'playwright/no-conditional-in-test': 'error',
|
||||
'playwright/no-page-pause': 'error',
|
||||
'playwright/no-wait-for-timeout': 'warn',
|
||||
'playwright/prefer-web-first-assertions': 'error',
|
||||
|
||||
// Console usage
|
||||
'no-console': ['warn', { allow: ['warn', 'error'] }],
|
||||
},
|
||||
overrides: [
|
||||
{
|
||||
// Config files can use console and have relaxed formatting
|
||||
files: ['*.config.{js,ts}', 'playwright.config.ts'],
|
||||
rules: {
|
||||
'no-console': 'off',
|
||||
'@typescript-eslint/no-explicit-any': 'off',
|
||||
},
|
||||
},
|
||||
{
|
||||
// Test files specific rules
|
||||
files: ['**/*.spec.ts', '**/*.test.ts'],
|
||||
rules: {
|
||||
'@typescript-eslint/no-explicit-any': 'off', // Page objects often need any
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
24
tests/e2e/.gitignore
vendored
Normal file
24
tests/e2e/.gitignore
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
node_modules/
|
||||
# All Playwright output — HTML report, JSON summary, per-test traces /
|
||||
# screenshots / videos. Set via outputDir + reporter paths in playwright.config.ts.
|
||||
/artifacts/
|
||||
/playwright/.cache/
|
||||
.env
|
||||
dist/
|
||||
*.log
|
||||
yarn-error.log
|
||||
.yarn/cache
|
||||
.yarn/install-state.gz
|
||||
.vscode/
|
||||
|
||||
# playwright-cli artifacts (snapshots, screenshots, videos, traces)
|
||||
.playwright-cli/
|
||||
|
||||
# backend coordinates written by the pytest bootstrap (bootstrap/setup.py);
|
||||
# loaded by playwright.config.ts via dotenv override.
|
||||
.env.local
|
||||
|
||||
# AI test-planner scratch (playwright-test-planner writes markdown plans
|
||||
# here before the generator turns them into .spec.ts files; the tests are
|
||||
# the source of truth, plans are regenerable).
|
||||
specs/
|
||||
30
tests/e2e/.prettierignore
Normal file
30
tests/e2e/.prettierignore
Normal file
@@ -0,0 +1,30 @@
|
||||
# Dependencies
|
||||
node_modules/
|
||||
|
||||
# Generated test outputs
|
||||
artifacts/
|
||||
playwright/.cache/
|
||||
|
||||
# Build outputs
|
||||
dist/
|
||||
|
||||
# Environment files
|
||||
.env
|
||||
.env.local
|
||||
.env*.local
|
||||
|
||||
# Lock files
|
||||
yarn.lock
|
||||
package-lock.json
|
||||
pnpm-lock.yaml
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
yarn-error.log
|
||||
|
||||
# IDE
|
||||
.vscode/
|
||||
.idea/
|
||||
|
||||
# Other
|
||||
.DS_Store
|
||||
6
tests/e2e/.prettierrc.json
Normal file
6
tests/e2e/.prettierrc.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"useTabs": false,
|
||||
"tabWidth": 2,
|
||||
"singleQuote": true,
|
||||
"trailingComma": "all"
|
||||
}
|
||||
36
tests/e2e/bootstrap/run.py
Normal file
36
tests/e2e/bootstrap/run.py
Normal file
@@ -0,0 +1,36 @@
|
||||
import os
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
|
||||
|
||||
def test_e2e(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
seeder: types.TestContainerDocker,
|
||||
) -> None:
|
||||
"""
|
||||
One-command e2e: pytest brings up the backend and starts the seeder
|
||||
container, then shells out to `yarn test` so Playwright runs against
|
||||
the provisioned instance. Each spec owns its own data via the seeder.
|
||||
Intended as the primary CI entrypoint.
|
||||
"""
|
||||
e2e_dir = Path(__file__).resolve().parents[1] # bootstrap/ -> e2e/
|
||||
host_cfg = signoz.self.host_configs["8080"]
|
||||
seeder_cfg = seeder.host_configs["8080"]
|
||||
env = {
|
||||
**os.environ,
|
||||
"SIGNOZ_E2E_BASE_URL": host_cfg.base(),
|
||||
"SIGNOZ_E2E_USERNAME": USER_ADMIN_EMAIL,
|
||||
"SIGNOZ_E2E_PASSWORD": USER_ADMIN_PASSWORD,
|
||||
"SIGNOZ_E2E_SEEDER_URL": seeder_cfg.base(),
|
||||
}
|
||||
result = subprocess.run(
|
||||
["yarn", "test"],
|
||||
cwd=str(e2e_dir),
|
||||
env=env,
|
||||
check=False,
|
||||
)
|
||||
assert result.returncode == 0, f"Playwright exited with code {result.returncode}"
|
||||
46
tests/e2e/bootstrap/setup.py
Normal file
46
tests/e2e/bootstrap/setup.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
|
||||
|
||||
def _env_file() -> Path:
|
||||
override = os.environ.get("SIGNOZ_E2E_ENV_FILE")
|
||||
if override:
|
||||
return Path(override)
|
||||
# tests/e2e/bootstrap/setup.py -> tests/e2e/.env.local
|
||||
return Path(__file__).resolve().parents[1] / ".env.local"
|
||||
|
||||
|
||||
def test_setup(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
seeder: types.TestContainerDocker,
|
||||
) -> None:
|
||||
"""
|
||||
Bring the SigNoz backend up, register the admin, start the HTTP seeder
|
||||
container, and persist endpoint coordinates for the Playwright side as
|
||||
.env.local (loaded by playwright.config.ts via dotenv; overrides the
|
||||
user-owned .env). Each spec owns its own data via the seeder — no
|
||||
global pre-seed here.
|
||||
"""
|
||||
host_cfg = signoz.self.host_configs["8080"]
|
||||
seeder_cfg = seeder.host_configs["8080"]
|
||||
out = _env_file()
|
||||
out.parent.mkdir(parents=True, exist_ok=True)
|
||||
out.write_text(
|
||||
"# Generated by tests/e2e/bootstrap/setup.py — do not edit.\n"
|
||||
f"SIGNOZ_E2E_BASE_URL={host_cfg.base()}\n"
|
||||
f"SIGNOZ_E2E_USERNAME={USER_ADMIN_EMAIL}\n"
|
||||
f"SIGNOZ_E2E_PASSWORD={USER_ADMIN_PASSWORD}\n"
|
||||
f"SIGNOZ_E2E_SEEDER_URL={seeder_cfg.base()}\n"
|
||||
)
|
||||
|
||||
|
||||
def test_teardown(
|
||||
signoz: types.SigNoz, # pylint: disable=unused-argument
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
seeder: types.TestContainerDocker, # pylint: disable=unused-argument
|
||||
) -> None:
|
||||
"""Companion to test_setup — invoked with --teardown to free containers."""
|
||||
83
tests/e2e/fixtures/auth.ts
Normal file
83
tests/e2e/fixtures/auth.ts
Normal file
@@ -0,0 +1,83 @@
|
||||
import {
|
||||
test as base,
|
||||
expect,
|
||||
type Browser,
|
||||
type BrowserContext,
|
||||
type Page,
|
||||
} from '@playwright/test';
|
||||
|
||||
export type User = { email: string; password: string };
|
||||
|
||||
// Default user — admin from the pytest bootstrap (.env.local) or staging .env.
|
||||
export const ADMIN: User = {
|
||||
email: process.env.SIGNOZ_E2E_USERNAME!,
|
||||
password: process.env.SIGNOZ_E2E_PASSWORD!,
|
||||
};
|
||||
|
||||
// Per-worker storageState cache. One login per unique user per worker.
|
||||
// Promise-valued so concurrent requests share the same in-flight work.
|
||||
// Held in memory only — no .auth/ dir, no JSON on disk.
|
||||
type StorageState = Awaited<ReturnType<BrowserContext['storageState']>>;
|
||||
const storageByUser = new Map<string, Promise<StorageState>>();
|
||||
|
||||
async function storageFor(browser: Browser, user: User): Promise<StorageState> {
|
||||
const cached = storageByUser.get(user.email);
|
||||
if (cached) return cached;
|
||||
|
||||
const task = (async () => {
|
||||
const ctx = await browser.newContext();
|
||||
const page = await ctx.newPage();
|
||||
await login(page, user);
|
||||
const state = await ctx.storageState();
|
||||
await ctx.close();
|
||||
return state;
|
||||
})();
|
||||
|
||||
storageByUser.set(user.email, task);
|
||||
return task;
|
||||
}
|
||||
|
||||
async function login(page: Page, user: User): Promise<void> {
|
||||
if (!user.email || !user.password) {
|
||||
throw new Error(
|
||||
'User credentials missing. Set SIGNOZ_E2E_USERNAME / SIGNOZ_E2E_PASSWORD ' +
|
||||
'(pytest bootstrap writes them to .env.local), or pass a User via test.use({ user: ... }).',
|
||||
);
|
||||
}
|
||||
await page.goto('/login?password=Y');
|
||||
await page.getByTestId('email').fill(user.email);
|
||||
await page.getByTestId('initiate_login').click();
|
||||
await page.getByTestId('password').fill(user.password);
|
||||
await page.getByRole('button', { name: 'Sign in with Password' }).click();
|
||||
await page
|
||||
.getByText('Hello there, Welcome to your')
|
||||
.waitFor({ state: 'visible' });
|
||||
}
|
||||
|
||||
export const test = base.extend<{
|
||||
/**
|
||||
* User identity for this test. Override with `test.use({ user: ... })` at
|
||||
* the describe or test level to run the suite as a different user.
|
||||
* Defaults to ADMIN (the pytest-bootstrap-seeded admin).
|
||||
*/
|
||||
user: User;
|
||||
|
||||
/**
|
||||
* A Page whose context is already authenticated as `user`. First request
|
||||
* for a given user triggers one login per worker; the resulting
|
||||
* storageState is held in memory and reused for all later requests.
|
||||
*/
|
||||
authedPage: Page;
|
||||
}>({
|
||||
user: [ADMIN, { option: true }],
|
||||
|
||||
authedPage: async ({ browser, user }, use) => {
|
||||
const storageState = await storageFor(browser, user);
|
||||
const ctx = await browser.newContext({ storageState });
|
||||
const page = await ctx.newPage();
|
||||
await use(page);
|
||||
await ctx.close();
|
||||
},
|
||||
});
|
||||
|
||||
export { expect };
|
||||
736
tests/e2e/legacy/alerts/alerts-downtime.spec.ts
Normal file
736
tests/e2e/legacy/alerts/alerts-downtime.spec.ts
Normal file
@@ -0,0 +1,736 @@
|
||||
// Playwright replay of platform-pod/issues/2095 alerts + planned-downtime
|
||||
// regression suite. Derived from run-3 selectors.
|
||||
//
|
||||
// Run: yarn test tests/alerts-downtime/alerts-downtime.spec.ts
|
||||
// baseURL + storageState come from playwright.config.ts; env is populated by
|
||||
// the pytest bootstrap (or .env for staging mode). The 2095 flows mutate
|
||||
// shared tenant state, so run them serially regardless of config-level
|
||||
// fullyParallel.
|
||||
|
||||
import { test, expect } from '../../fixtures/auth';
|
||||
|
||||
const E2E_TAG = `e2e-2095-${Math.floor(Date.now() / 1000)}`;
|
||||
|
||||
test.describe('SUITE.md — platform-pod/issues/2095 regression', () => {
|
||||
// Serial: 2095 flows mutate shared tenant state (one flow's rules show up in
|
||||
// another flow's list; toasts from test A block clicks in test B).
|
||||
test.describe.configure({ mode: 'serial' });
|
||||
|
||||
test('Flow 1 — alerts list, toggle, delete (depends on Flow 2 create)', async ({ authedPage: page }) => {
|
||||
// Seed: create a rule via the list's 'New Alert Rule' flow.
|
||||
await page.goto(`/alerts?tab=AlertRules`);
|
||||
|
||||
// Seed via direct fetch — UI metric/channel pickers are unreliable from the CLI too
|
||||
// (Ant Select onChange is brittle under test-runner speed). Same pattern as Flow 5.
|
||||
const seedId = await page.evaluate(async ({ name }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const body = {
|
||||
alert: name,
|
||||
alertType: 'METRIC_BASED_ALERT',
|
||||
ruleType: 'threshold_rule',
|
||||
condition: {
|
||||
thresholds: { kind: 'basic', spec: [{ name: 'critical', target: 0, matchType: '1', op: '1', channels: [], targetUnit: '' }] },
|
||||
compositeQuery: {
|
||||
queryType: 'builder', panelType: 'graph',
|
||||
queries: [{
|
||||
type: 'builder_query',
|
||||
spec: {
|
||||
name: 'A', signal: 'metrics', source: '', stepInterval: null, disabled: false,
|
||||
filter: { expression: '' }, having: { expression: '' },
|
||||
aggregations: [{ metricName: 'app.currency_counter', timeAggregation: 'rate', spaceAggregation: 'sum' }],
|
||||
},
|
||||
}],
|
||||
},
|
||||
selectedQueryName: 'A',
|
||||
alertOnAbsent: false,
|
||||
requireMinPoints: false,
|
||||
},
|
||||
annotations: { description: 'spec.ts flow-1', summary: 'spec.ts flow-1' },
|
||||
labels: {},
|
||||
notificationSettings: { groupBy: [], usePolicy: true, renotify: { enabled: false, interval: '30m', alertStates: [] } },
|
||||
evaluation: { kind: 'rolling', spec: { evalWindow: '5m0s', frequency: '1m' } },
|
||||
schemaVersion: 'v2alpha1', source: 'spec.ts-flow-1', version: 'v5',
|
||||
};
|
||||
const resp = await fetch('/api/v2/rules', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
const json = await resp.json();
|
||||
if (resp.status !== 201) throw new Error(`flow-1 seed POST: ${resp.status} ${JSON.stringify(json)}`);
|
||||
return json.data.id as string;
|
||||
}, { name: `${E2E_TAG}-create` });
|
||||
void seedId; // rule id not needed for UI assertions below
|
||||
await page.goto(`/alerts?tab=AlertRules`);
|
||||
|
||||
// Open action menu
|
||||
await page.locator('tbody tr', { hasText: `${E2E_TAG}-create` }).locator('.ant-dropdown-trigger, .dropdown-button').click();
|
||||
await expect(page.getByRole('menuitem', { name: /^disable$/i })).toBeVisible();
|
||||
|
||||
// Disable
|
||||
await page.getByRole('menuitem', { name: /^disable$/i }).click();
|
||||
await page.waitForResponse(r => r.url().includes('/api/v2/rules/') && r.request().method() === 'PATCH');
|
||||
await expect(page.locator('tbody tr', { hasText: `${E2E_TAG}-create` })).toContainText(/disabled/i);
|
||||
|
||||
// Enable
|
||||
await page.locator('tbody tr', { hasText: `${E2E_TAG}-create` }).locator('.ant-dropdown-trigger, .dropdown-button').click();
|
||||
await page.getByRole('menuitem', { name: /^enable$/i }).click();
|
||||
await page.waitForResponse(r => r.url().includes('/api/v2/rules/') && r.request().method() === 'PATCH');
|
||||
|
||||
// Delete
|
||||
await page.locator('tbody tr', { hasText: `${E2E_TAG}-create` }).locator('.ant-dropdown-trigger, .dropdown-button').click();
|
||||
await page.getByRole('menuitem', { name: /^delete$/i }).click();
|
||||
await page.waitForResponse(r => r.url().includes('/api/v2/rules/') && r.request().method() === 'DELETE');
|
||||
// Assert the specific E2E_TAG row is gone. A tenant-wide "no alert rules yet"
|
||||
// assertion is unreliable because other tests / leftover rules may coexist.
|
||||
await expect(page.locator('tbody tr', { hasText: `${E2E_TAG}-create` })).toHaveCount(0);
|
||||
});
|
||||
|
||||
test('Flow 2 — create, edit, clone, labels round-trip', async ({ authedPage: page }) => {
|
||||
// Navigate to establish the origin for localStorage/cookies before direct-fetch.
|
||||
await page.goto(`/alerts?tab=AlertRules`);
|
||||
|
||||
// 2.8 — create with labels via direct fetch (metric/channel UI pickers are too brittle
|
||||
// in sequential CLI runs for load-bearing creates). We assert on the BE roundtrip.
|
||||
const labeledId = await page.evaluate(async ({ name }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const body = {
|
||||
alert: name,
|
||||
alertType: 'METRIC_BASED_ALERT',
|
||||
ruleType: 'threshold_rule',
|
||||
condition: {
|
||||
thresholds: { kind: 'basic', spec: [{ name: 'critical', target: 0, matchType: '1', op: '1', channels: [], targetUnit: '' }] },
|
||||
compositeQuery: {
|
||||
queryType: 'builder', panelType: 'graph',
|
||||
queries: [{
|
||||
type: 'builder_query',
|
||||
spec: {
|
||||
name: 'A', signal: 'metrics', source: '', stepInterval: null, disabled: false,
|
||||
filter: { expression: '' }, having: { expression: '' },
|
||||
aggregations: [{ metricName: 'app.currency_counter', timeAggregation: 'rate', spaceAggregation: 'sum' }],
|
||||
},
|
||||
}],
|
||||
},
|
||||
selectedQueryName: 'A',
|
||||
alertOnAbsent: false,
|
||||
requireMinPoints: false,
|
||||
},
|
||||
annotations: { description: `${name}-desc`, summary: `${name}-summary` },
|
||||
labels: { env: 'prod', severity: 'warn' },
|
||||
notificationSettings: { groupBy: [], usePolicy: true, renotify: { enabled: false, interval: '30m', alertStates: [] } },
|
||||
evaluation: { kind: 'rolling', spec: { evalWindow: '5m0s', frequency: '1m' } },
|
||||
schemaVersion: 'v2alpha1', source: 'spec.ts-flow-2', version: 'v5',
|
||||
};
|
||||
const resp = await fetch('/api/v2/rules', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
const json = await resp.json();
|
||||
if (resp.status !== 201) throw new Error(`flow-2 labels POST: ${resp.status} ${JSON.stringify(json)}`);
|
||||
return json.data.id as string;
|
||||
}, { name: `${E2E_TAG}-labels` });
|
||||
await page.goto(`/alerts?tab=AlertRules`);
|
||||
await expect(page.getByText(`${E2E_TAG}-labels`)).toBeVisible();
|
||||
|
||||
// 2.9 — hydration: visit the overview URL directly and confirm label pills render.
|
||||
await page.goto(`/alerts/overview?ruleId=${labeledId}`);
|
||||
await expect(page.getByTestId(/label-pill-env-prod/)).toBeVisible();
|
||||
await expect(page.getByTestId(/label-pill-severity-warn/)).toBeVisible();
|
||||
|
||||
// 2.10 — remove severity label via PUT (bypasses label-input remove-button UI which
|
||||
// relies on a testid that may not be present in edit mode across all versions).
|
||||
await page.evaluate(async ({ id, name }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const body = {
|
||||
alert: name,
|
||||
alertType: 'METRIC_BASED_ALERT',
|
||||
ruleType: 'threshold_rule',
|
||||
condition: {
|
||||
thresholds: { kind: 'basic', spec: [{ name: 'critical', target: 0, matchType: '1', op: '1', channels: [], targetUnit: '' }] },
|
||||
compositeQuery: {
|
||||
queryType: 'builder', panelType: 'graph',
|
||||
queries: [{
|
||||
type: 'builder_query',
|
||||
spec: {
|
||||
name: 'A', signal: 'metrics', source: '', stepInterval: null, disabled: false,
|
||||
filter: { expression: '' }, having: { expression: '' },
|
||||
aggregations: [{ metricName: 'app.currency_counter', timeAggregation: 'rate', spaceAggregation: 'sum' }],
|
||||
},
|
||||
}],
|
||||
},
|
||||
selectedQueryName: 'A',
|
||||
alertOnAbsent: false,
|
||||
requireMinPoints: false,
|
||||
},
|
||||
annotations: { description: `${name}-desc`, summary: `${name}-summary` },
|
||||
labels: { env: 'prod' },
|
||||
notificationSettings: { groupBy: [], usePolicy: true, renotify: { enabled: false, interval: '30m', alertStates: [] } },
|
||||
evaluation: { kind: 'rolling', spec: { evalWindow: '5m0s', frequency: '1m' } },
|
||||
schemaVersion: 'v2alpha1', source: 'spec.ts-flow-2', version: 'v5',
|
||||
};
|
||||
await fetch(`/api/v2/rules/${id}`, {
|
||||
method: 'PUT',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
}, { id: labeledId, name: `${E2E_TAG}-labels` });
|
||||
await page.goto(`/alerts/overview?ruleId=${labeledId}`);
|
||||
await expect(page.getByTestId(/label-pill-env-prod/)).toBeVisible();
|
||||
await expect(page.getByTestId(/label-pill-severity-warn/)).toHaveCount(0);
|
||||
|
||||
// Cleanup
|
||||
await page.evaluate(async ({ id }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
await fetch(`/api/v2/rules/${id}`, {
|
||||
method: 'DELETE',
|
||||
headers: { Authorization: `Bearer ${token}` },
|
||||
});
|
||||
}, { id: labeledId });
|
||||
});
|
||||
|
||||
test('Flow 2 — Test Notification (2.11 success, 2.12 empty-result, 2.13 disabled-while-invalid)', async ({ authedPage: page }) => {
|
||||
await page.goto(`/alerts/new`);
|
||||
|
||||
// 2.13 disabled pre-state — fresh form, no name, no metric
|
||||
const testBtn = page.getByRole('button', { name: /test notification/i });
|
||||
await expect(testBtn).toBeDisabled();
|
||||
|
||||
// 2.11 / 2.12 — direct-fetch POST /api/v2/rules/test. Driving the V2 form's metric +
|
||||
// channel pickers via CLI is brittle (Ant Select onChange behavior varies); the API
|
||||
// contract is what matters for this flow's regression probe. UI-driven enable-after-fill
|
||||
// for 2.13 is covered via run-5's interactive replay.
|
||||
const buildTestBody = (target: number) => ({
|
||||
alert: `${E2E_TAG}-test-notif`,
|
||||
alertType: 'METRIC_BASED_ALERT',
|
||||
ruleType: 'threshold_rule',
|
||||
condition: {
|
||||
thresholds: { kind: 'basic', spec: [{ name: 'critical', target, matchType: '1', op: '1', channels: [], targetUnit: '' }] },
|
||||
compositeQuery: {
|
||||
queryType: 'builder', panelType: 'graph',
|
||||
queries: [{
|
||||
type: 'builder_query',
|
||||
spec: {
|
||||
name: 'A', signal: 'metrics', source: '', stepInterval: null, disabled: false,
|
||||
filter: { expression: '' }, having: { expression: '' },
|
||||
aggregations: [{ metricName: 'app.currency_counter', timeAggregation: 'rate', spaceAggregation: 'sum' }],
|
||||
},
|
||||
}],
|
||||
},
|
||||
selectedQueryName: 'A',
|
||||
alertOnAbsent: false,
|
||||
requireMinPoints: false,
|
||||
},
|
||||
annotations: { description: `${E2E_TAG}-test-notif`, summary: `${E2E_TAG}-test-notif` },
|
||||
labels: {},
|
||||
notificationSettings: { groupBy: [], usePolicy: true, renotify: { enabled: false, interval: '30m', alertStates: [] } },
|
||||
evaluation: { kind: 'rolling', spec: { evalWindow: '5m0s', frequency: '1m' } },
|
||||
schemaVersion: 'v2alpha1', source: 'spec.ts-flow-2-test-notif', version: 'v5',
|
||||
});
|
||||
|
||||
const body211 = await page.evaluate(async (body) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const resp = await fetch('/api/v2/rules/test', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
return { status: resp.status, body: await resp.json() };
|
||||
}, buildTestBody(0));
|
||||
expect(body211.status).toBe(200);
|
||||
expect(body211.body.data.alertCount).toBeGreaterThan(0);
|
||||
|
||||
const body212 = await page.evaluate(async (body) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const resp = await fetch('/api/v2/rules/test', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
return { status: resp.status, body: await resp.json() };
|
||||
}, buildTestBody(1e18));
|
||||
expect(body212.status).toBe(200);
|
||||
// NOTE (run-5 finding): /api/v2/rules/test bypasses threshold evaluation via
|
||||
// WithSendUnmatched() (pkg/query-service/rules/test_notification.go:52-53), so an
|
||||
// unsatisfiable threshold still yields alertCount >= 1. Assert on the contract only.
|
||||
expect(body212.body.data).toHaveProperty('alertCount');
|
||||
});
|
||||
|
||||
test('Flow 3 — alert details and AlertNotFound', async ({ authedPage: page }) => {
|
||||
// Seed via direct fetch (same reasoning as Flow 1/2-main).
|
||||
await page.goto(`/alerts?tab=AlertRules`);
|
||||
const ruleId = await page.evaluate(async ({ name }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const body = {
|
||||
alert: name,
|
||||
alertType: 'METRIC_BASED_ALERT',
|
||||
ruleType: 'threshold_rule',
|
||||
condition: {
|
||||
thresholds: { kind: 'basic', spec: [{ name: 'critical', target: 0, matchType: '1', op: '1', channels: [], targetUnit: '' }] },
|
||||
compositeQuery: {
|
||||
queryType: 'builder', panelType: 'graph',
|
||||
queries: [{
|
||||
type: 'builder_query',
|
||||
spec: {
|
||||
name: 'A', signal: 'metrics', source: '', stepInterval: null, disabled: false,
|
||||
filter: { expression: '' }, having: { expression: '' },
|
||||
aggregations: [{ metricName: 'app.currency_counter', timeAggregation: 'rate', spaceAggregation: 'sum' }],
|
||||
},
|
||||
}],
|
||||
},
|
||||
selectedQueryName: 'A',
|
||||
alertOnAbsent: false,
|
||||
requireMinPoints: false,
|
||||
},
|
||||
annotations: { description: 'spec.ts flow-3', summary: 'spec.ts flow-3' },
|
||||
labels: { severity: 'warning' },
|
||||
notificationSettings: { groupBy: [], usePolicy: true, renotify: { enabled: false, interval: '30m', alertStates: [] } },
|
||||
evaluation: { kind: 'rolling', spec: { evalWindow: '5m0s', frequency: '1m' } },
|
||||
schemaVersion: 'v2alpha1', source: 'spec.ts-flow-3', version: 'v5',
|
||||
};
|
||||
const resp = await fetch('/api/v2/rules', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
const json = await resp.json();
|
||||
if (resp.status !== 201) throw new Error(`flow-3 seed POST: ${resp.status} ${JSON.stringify(json)}`);
|
||||
return json.data.id as string;
|
||||
}, { name: `${E2E_TAG}-details` });
|
||||
|
||||
// 3.1–3.3 — valid overview + history
|
||||
await page.goto(`/alerts/overview?ruleId=${ruleId}`);
|
||||
await expect(page.locator('.alert-header__input, [data-testid=alert-name-input]')).toBeVisible();
|
||||
await page.getByRole('tab', { name: /history/i }).click();
|
||||
await expect(page.getByText(/total triggered/i)).toBeVisible();
|
||||
|
||||
// 3.4 — bogus UUID
|
||||
await page.goto(`/alerts/overview?ruleId=00000000-0000-0000-0000-000000000000`);
|
||||
await expect(page).toHaveTitle('Alert Not Found');
|
||||
|
||||
// 3.5 — missing ruleId
|
||||
await page.goto(`/alerts/overview`);
|
||||
await expect(page.getByText(/we couldn'?t find/i)).toBeVisible();
|
||||
|
||||
// 3.6 — delete via direct fetch, then revisit
|
||||
await page.evaluate(async ({ id }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
await fetch(`/api/v2/rules/${id}`, { method: 'DELETE', headers: { Authorization: `Bearer ${token}` } });
|
||||
}, { id: ruleId });
|
||||
await page.goto(`/alerts/overview?ruleId=${ruleId}`);
|
||||
await expect(page).toHaveTitle('Alert Not Found');
|
||||
});
|
||||
|
||||
test('Flow 4 — planned downtime CRUD', async ({ authedPage: page }) => {
|
||||
// 4.1a — direct URL.
|
||||
// The "no data" copy is tenant-state-dependent; assert the list renders (header row) instead.
|
||||
await page.goto(`/alerts?tab=Configuration&subTab=planned-downtime`);
|
||||
await expect(page.locator('table, .ant-spin').first()).toBeVisible();
|
||||
|
||||
// 4.1b — tab click
|
||||
await page.goto(`/alerts?tab=AlertRules`);
|
||||
await page.getByRole('tab', { name: /configuration/i }).click();
|
||||
await expect(page.locator('table, .ant-spin').first()).toBeVisible();
|
||||
|
||||
// 4.3 — empty-form validation (click Add with just the name)
|
||||
await page.getByRole('button', { name: /new downtime/i }).click();
|
||||
await page.locator('#create-form_name').fill(`${E2E_TAG}-downtime-once`);
|
||||
await page.getByRole('button', { name: /add downtime schedule/i }).click();
|
||||
await expect(page.getByText(/please enter ends on/i)).toBeVisible();
|
||||
|
||||
// 4.2 — create via direct fetch.
|
||||
// The Ant DatePicker calendar-cell clicks are unreliable (cells-in-view index varies
|
||||
// across months; title-based selectors require tomorrow's date to be computed in the
|
||||
// displayed timezone). The 2095 refactor doesn't touch the DatePicker logic; UI-probing
|
||||
// this step adds flakiness without improving coverage. We skip the calendar UI and
|
||||
// POST directly. The list assertions below still verify the BE roundtrip.
|
||||
await page.keyboard.press('Escape');
|
||||
const downtimeId = await page.evaluate(async ({ name }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const now = Date.now();
|
||||
const body = {
|
||||
name,
|
||||
description: 'spec.ts downtime',
|
||||
schedule: {
|
||||
timezone: 'UTC',
|
||||
startTime: new Date(now).toISOString(),
|
||||
endTime: new Date(now + 24 * 60 * 60 * 1000).toISOString(),
|
||||
recurrence: null,
|
||||
},
|
||||
alertIds: [],
|
||||
};
|
||||
const resp = await fetch('/api/v1/downtime_schedules', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
const json = await resp.json();
|
||||
if (resp.status >= 300) throw new Error(`POST /downtime_schedules: ${resp.status} ${JSON.stringify(json)}`);
|
||||
return json.data?.id ?? json.id;
|
||||
}, { name: `${E2E_TAG}-downtime-once` });
|
||||
await page.goto(`/alerts?tab=Configuration&subTab=planned-downtime`);
|
||||
// The downtime list uses accordion/card layout, not a real <tr>. Assert by visible text.
|
||||
await expect(page.getByText(`${E2E_TAG}-downtime-once`)).toBeVisible();
|
||||
|
||||
// 4.4 — edit via direct fetch (same reasoning as 4.2: the pencil icon is a lucide SVG
|
||||
// that historically requires DOM injection to be reliably clickable — run-4 documented
|
||||
// this. UI-probing adds flake without covering 2095 refactor scope).
|
||||
await page.evaluate(async ({ id, name }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const now = Date.now();
|
||||
const body = {
|
||||
name,
|
||||
description: 'spec.ts downtime edited',
|
||||
schedule: {
|
||||
timezone: 'UTC',
|
||||
startTime: new Date(now).toISOString(),
|
||||
endTime: new Date(now + 24 * 60 * 60 * 1000).toISOString(),
|
||||
recurrence: null,
|
||||
},
|
||||
alertIds: [],
|
||||
};
|
||||
const resp = await fetch(`/api/v1/downtime_schedules/${id}`, {
|
||||
method: 'PUT',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
if (resp.status >= 300) {
|
||||
const j = await resp.text();
|
||||
throw new Error(`PUT /downtime_schedules: ${resp.status} ${j}`);
|
||||
}
|
||||
}, { id: downtimeId, name: `${E2E_TAG}-downtime-edited` });
|
||||
await page.reload();
|
||||
await expect(page.getByText(`${E2E_TAG}-downtime-edited`)).toBeVisible();
|
||||
|
||||
// 4.5 — delete via direct fetch; verify UI reflects the delete.
|
||||
await page.evaluate(async ({ id }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const resp = await fetch(`/api/v1/downtime_schedules/${id}`, {
|
||||
method: 'DELETE',
|
||||
headers: { Authorization: `Bearer ${token}` },
|
||||
});
|
||||
if (resp.status >= 300) throw new Error(`DELETE /downtime_schedules: ${resp.status}`);
|
||||
}, { id: downtimeId });
|
||||
await page.reload();
|
||||
await expect(page.getByText(`${E2E_TAG}-downtime-edited`)).toHaveCount(0);
|
||||
});
|
||||
|
||||
test('Flow 6 — anomaly alerts (6.1 type-selection, 6.2 classic-form entry, 6.4 create, 6.5 edit z-score, 6.6 toggle, 6.7 delete, 6.8 AlertNotFound)', async ({ authedPage: page }) => {
|
||||
// 6.1 — type-selection page
|
||||
await page.goto(`/alerts/type-selection`);
|
||||
const anomalyCard = page.getByTestId('alert-type-card-ANOMALY_BASED_ALERT');
|
||||
await expect(anomalyCard).toBeVisible();
|
||||
await expect(anomalyCard.getByText('Beta')).toBeVisible();
|
||||
|
||||
// 6.2 — click Anomaly card → classic form with anomaly tab selected
|
||||
await anomalyCard.click();
|
||||
await page.waitForURL(/ruleType=anomaly_rule.*alertType=METRIC_BASED_ALERT/);
|
||||
const anomalyTabBtn = page.locator('button[value="anomaly_rule"]');
|
||||
await expect(anomalyTabBtn).toHaveClass(/selected/);
|
||||
// Confirm classic, not V2
|
||||
expect(await page.locator('.create-alert-v2-footer').count()).toBe(0);
|
||||
|
||||
// 6.4 — create via direct fetch (UI Ant Select metric/channel pickers are unreliable from MCP).
|
||||
// Pre-convert namedArgs → args:[{name,value}] because v5 builder spec rejects namedArgs.
|
||||
const ruleId = await page.evaluate(async ({ name }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const body = {
|
||||
alert: name,
|
||||
alertType: 'METRIC_BASED_ALERT',
|
||||
ruleType: 'anomaly_rule',
|
||||
condition: {
|
||||
thresholds: { kind: 'basic', spec: [{ name: 'critical', target: 3, matchType: '1', op: '1', channels: [], targetUnit: '' }] },
|
||||
compositeQuery: {
|
||||
queryType: 'builder',
|
||||
panelType: 'graph',
|
||||
queries: [{
|
||||
type: 'builder_query',
|
||||
spec: {
|
||||
name: 'A', signal: 'metrics', source: '', stepInterval: null, disabled: false,
|
||||
filter: { expression: '' }, having: { expression: '' },
|
||||
aggregations: [{ metricName: 'app.currency_counter', timeAggregation: 'rate', spaceAggregation: 'sum' }],
|
||||
functions: [{ name: 'anomaly', args: [{ name: 'z_score_threshold', value: 3 }] }],
|
||||
},
|
||||
}],
|
||||
},
|
||||
selectedQueryName: 'A',
|
||||
alertOnAbsent: false,
|
||||
requireMinPoints: false,
|
||||
algorithm: 'standard',
|
||||
seasonality: 'hourly',
|
||||
},
|
||||
annotations: { description: 'spec.ts anomaly', summary: 'spec.ts anomaly' },
|
||||
labels: { severity: 'warning' },
|
||||
notificationSettings: { groupBy: [], usePolicy: true, renotify: { enabled: false, interval: '30m', alertStates: [] } },
|
||||
evaluation: { kind: 'rolling', spec: { evalWindow: '5m0s', frequency: '1m' } },
|
||||
schemaVersion: 'v2alpha1',
|
||||
source: 'spec.ts-flow-6',
|
||||
version: 'v5',
|
||||
};
|
||||
const resp = await fetch('/api/v2/rules', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
const json = await resp.json();
|
||||
if (resp.status !== 201) throw new Error(`POST /api/v2/rules failed: ${resp.status} ${JSON.stringify(json)}`);
|
||||
return json.data.id as string;
|
||||
}, { name: `${E2E_TAG}-anomaly` });
|
||||
|
||||
// 6.5 — PUT z-score 3→5
|
||||
await page.evaluate(async ({ id, name }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const body = {
|
||||
alert: name,
|
||||
alertType: 'METRIC_BASED_ALERT',
|
||||
ruleType: 'anomaly_rule',
|
||||
condition: {
|
||||
thresholds: { kind: 'basic', spec: [{ name: 'critical', target: 5, matchType: '1', op: '1', channels: [], targetUnit: '' }] },
|
||||
compositeQuery: {
|
||||
queryType: 'builder', panelType: 'graph',
|
||||
queries: [{
|
||||
type: 'builder_query',
|
||||
spec: {
|
||||
name: 'A', signal: 'metrics', source: '', stepInterval: null, disabled: false,
|
||||
filter: { expression: '' }, having: { expression: '' },
|
||||
aggregations: [{ metricName: 'app.currency_counter', timeAggregation: 'rate', spaceAggregation: 'sum' }],
|
||||
functions: [{ name: 'anomaly', args: [{ name: 'z_score_threshold', value: 5 }] }],
|
||||
},
|
||||
}],
|
||||
},
|
||||
selectedQueryName: 'A',
|
||||
alertOnAbsent: false,
|
||||
requireMinPoints: false,
|
||||
algorithm: 'standard',
|
||||
seasonality: 'hourly',
|
||||
},
|
||||
annotations: { description: 'spec.ts anomaly', summary: 'spec.ts anomaly' },
|
||||
labels: { severity: 'warning' },
|
||||
notificationSettings: { groupBy: [], usePolicy: true, renotify: { enabled: false, interval: '30m', alertStates: [] } },
|
||||
evaluation: { kind: 'rolling', spec: { evalWindow: '5m0s', frequency: '1m' } },
|
||||
schemaVersion: 'v2alpha1', source: 'spec.ts-flow-6', version: 'v5',
|
||||
};
|
||||
const resp = await fetch(`/api/v2/rules/${id}`, {
|
||||
method: 'PUT',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
if (resp.status !== 204) throw new Error(`PUT /api/v2/rules/${id} failed: ${resp.status}`);
|
||||
}, { id: ruleId, name: `${E2E_TAG}-anomaly` });
|
||||
|
||||
// 6.6 — detection-method toggle is asymmetric: anomaly → threshold transitions classic → V2.
|
||||
// (See run-6 RUN_REPORT.md observation #1. SUITE.md may be amended to reflect this.)
|
||||
await page.goto(`/alerts/new?ruleType=anomaly_rule&alertType=METRIC_BASED_ALERT`);
|
||||
const thresholdTabBtn = page.locator('button[value="threshold_rule"]');
|
||||
await thresholdTabBtn.click();
|
||||
await expect(page).toHaveURL(/ruleType=threshold_rule/);
|
||||
// V2 footer is now present, detection-method tabs are gone — no return path
|
||||
await expect(page.locator('.create-alert-v2-footer')).toBeVisible();
|
||||
expect(await page.locator('button[value="anomaly_rule"]').count()).toBe(0);
|
||||
|
||||
// 6.7 — DELETE
|
||||
await page.evaluate(async ({ id }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const resp = await fetch(`/api/v2/rules/${id}`, {
|
||||
method: 'DELETE',
|
||||
headers: { Authorization: `Bearer ${token}` },
|
||||
});
|
||||
if (resp.status !== 204) throw new Error(`DELETE /api/v2/rules/${id} failed: ${resp.status}`);
|
||||
}, { id: ruleId });
|
||||
|
||||
// 6.8 — AlertNotFound for the deleted anomaly rule
|
||||
await page.goto(`/alerts/overview?ruleId=${ruleId}`);
|
||||
await expect(page).toHaveTitle('Alert Not Found');
|
||||
|
||||
// 6.9 — POST /api/v2/rules/test with the anomaly DTO. The classic anomaly form has no
|
||||
// Test Notification button (V2-only feature), so this is a direct-fetch API-contract probe.
|
||||
// Same SendUnmatched bypass as run-5: alertCount: 0 is reachable only via a zero-data query.
|
||||
const test69 = await page.evaluate(async ({ name }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const body = {
|
||||
alert: name,
|
||||
alertType: 'METRIC_BASED_ALERT',
|
||||
ruleType: 'anomaly_rule',
|
||||
condition: {
|
||||
thresholds: { kind: 'basic', spec: [{ name: 'critical', target: 3, matchType: '1', op: '1', channels: [], targetUnit: '' }] },
|
||||
compositeQuery: {
|
||||
queryType: 'builder', panelType: 'graph',
|
||||
queries: [{
|
||||
type: 'builder_query',
|
||||
spec: {
|
||||
name: 'A', signal: 'metrics', source: '', stepInterval: null, disabled: false,
|
||||
filter: { expression: '' }, having: { expression: '' },
|
||||
aggregations: [{ metricName: 'app.currency_counter', timeAggregation: 'rate', spaceAggregation: 'sum' }],
|
||||
functions: [{ name: 'anomaly', args: [{ name: 'z_score_threshold', value: 3 }] }],
|
||||
},
|
||||
}],
|
||||
},
|
||||
selectedQueryName: 'A',
|
||||
alertOnAbsent: false,
|
||||
requireMinPoints: false,
|
||||
algorithm: 'standard',
|
||||
seasonality: 'hourly',
|
||||
},
|
||||
annotations: { description: 'spec.ts anomaly test-notification', summary: 'spec.ts anomaly test-notification' },
|
||||
labels: { severity: 'warning' },
|
||||
notificationSettings: { groupBy: [], usePolicy: true, renotify: { enabled: false, interval: '30m', alertStates: [] } },
|
||||
evaluation: { kind: 'rolling', spec: { evalWindow: '5m0s', frequency: '1m' } },
|
||||
schemaVersion: 'v2alpha1', source: 'spec.ts-flow-6-step6.9', version: 'v5',
|
||||
};
|
||||
const resp = await fetch('/api/v2/rules/test', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
const json = await resp.json();
|
||||
return { status: resp.status, body: json };
|
||||
}, { name: `${E2E_TAG}-anomaly-test` });
|
||||
expect(test69.status).toBe(200);
|
||||
expect(test69.body.data).toHaveProperty('alertCount');
|
||||
});
|
||||
|
||||
test('Flow 5 — classic experience + cascade-delete error paths', async ({ authedPage: page }) => {
|
||||
// 5.1 — switch to classic
|
||||
await page.goto(`/alerts/new?showClassicCreateAlertsPage=true&ruleType=threshold_rule`);
|
||||
await expect(page.getByText(/metrics based alert/i)).toBeVisible();
|
||||
|
||||
// 5.2/5.3 — fill + save classic alert.
|
||||
// Classic form uses #alert for the name input (not the V2 data-testid).
|
||||
// Drive via direct fetch for reliability — the classic metric/channel dropdowns are
|
||||
// interactively hard to pick (see run-3 Flow 5 notes). We still verify the UI renders,
|
||||
// then POST the rule, then continue exercising UI for downtime linking and cascade delete.
|
||||
await expect(page.locator('#alert')).toBeVisible();
|
||||
const classicRuleId = await page.evaluate(async ({ name }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const body = {
|
||||
alert: name,
|
||||
alertType: 'METRIC_BASED_ALERT',
|
||||
ruleType: 'threshold_rule',
|
||||
condition: {
|
||||
thresholds: { kind: 'basic', spec: [{ name: 'critical', target: 0, matchType: '1', op: '1', channels: [], targetUnit: '' }] },
|
||||
compositeQuery: {
|
||||
queryType: 'builder', panelType: 'graph',
|
||||
queries: [{
|
||||
type: 'builder_query',
|
||||
spec: {
|
||||
name: 'A', signal: 'metrics', source: '', stepInterval: null, disabled: false,
|
||||
filter: { expression: '' }, having: { expression: '' },
|
||||
aggregations: [{ metricName: 'app.currency_counter', timeAggregation: 'rate', spaceAggregation: 'sum' }],
|
||||
},
|
||||
}],
|
||||
},
|
||||
selectedQueryName: 'A',
|
||||
alertOnAbsent: false,
|
||||
requireMinPoints: false,
|
||||
},
|
||||
annotations: { description: 'classic e2e', summary: 'classic e2e' },
|
||||
labels: { severity: 'warning' },
|
||||
notificationSettings: { groupBy: [], usePolicy: true, renotify: { enabled: false, interval: '30m', alertStates: [] } },
|
||||
evaluation: { kind: 'rolling', spec: { evalWindow: '5m0s', frequency: '1m' } },
|
||||
schemaVersion: 'v2alpha1', source: 'spec.ts-flow-5', version: 'v5',
|
||||
};
|
||||
const resp = await fetch('/api/v2/rules', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
const json = await resp.json();
|
||||
if (resp.status !== 201) throw new Error(`classic POST /api/v2/rules failed: ${resp.status} ${JSON.stringify(json)}`);
|
||||
return json.data.id as string;
|
||||
}, { name: `${E2E_TAG}-classic` });
|
||||
await page.goto(`/alerts?tab=AlertRules`);
|
||||
const classicRow = page.locator('tbody tr', { hasText: `${E2E_TAG}-classic` });
|
||||
await expect(classicRow).toBeVisible();
|
||||
|
||||
// 5.4 — create downtime linked to the classic alert (direct fetch; see Flow 4 notes).
|
||||
const linkedDowntimeId = await page.evaluate(async ({ name, alertId }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const now = Date.now();
|
||||
const body = {
|
||||
name,
|
||||
description: 'spec.ts linked downtime',
|
||||
schedule: {
|
||||
timezone: 'UTC',
|
||||
startTime: new Date(now).toISOString(),
|
||||
endTime: new Date(now + 24 * 60 * 60 * 1000).toISOString(),
|
||||
recurrence: null,
|
||||
},
|
||||
alertIds: [alertId],
|
||||
};
|
||||
const resp = await fetch('/api/v1/downtime_schedules', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
const json = await resp.json();
|
||||
if (resp.status >= 300) throw new Error(`linked POST /downtime_schedules: ${resp.status} ${JSON.stringify(json)}`);
|
||||
return json.data?.id ?? json.id;
|
||||
}, { name: `${E2E_TAG}-downtime-linked`, alertId: classicRuleId });
|
||||
await page.goto(`/alerts?tab=Configuration&subTab=planned-downtime`);
|
||||
// Downtime list is accordion/card; assert by visible text, not <tr>.
|
||||
await expect(page.getByText(`${E2E_TAG}-downtime-linked`)).toBeVisible();
|
||||
|
||||
// 5.5 — delete the linked alert: expect 409 `already_exists` from the BE.
|
||||
// We direct-fetch rather than drive the ellipsis-menu → Delete UI so the assertion is
|
||||
// on the actual BE contract (ddb0cb66e: showErrorModal on convertToApiError). The
|
||||
// visual modal/toast UX was verified in run-3's full UI replay.
|
||||
const delRuleResp = await page.evaluate(async ({ id }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const resp = await fetch(`/api/v2/rules/${id}`, {
|
||||
method: 'DELETE',
|
||||
headers: { Authorization: `Bearer ${token}` },
|
||||
});
|
||||
const text = await resp.text();
|
||||
let body: any; try { body = JSON.parse(text); } catch { body = text; }
|
||||
return { status: resp.status, body };
|
||||
}, { id: classicRuleId });
|
||||
expect(delRuleResp.status).toBe(409);
|
||||
expect(delRuleResp.body.error?.code ?? delRuleResp.body.code).toBe('already_exists');
|
||||
expect(delRuleResp.body.error?.message ?? delRuleResp.body.message).toMatch(/cannot delete rule because it is referenced/i);
|
||||
|
||||
// 5.6 — delete the linked downtime: expect 409 with the paired message.
|
||||
const delDtResp = await page.evaluate(async ({ id }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const resp = await fetch(`/api/v1/downtime_schedules/${id}`, {
|
||||
method: 'DELETE',
|
||||
headers: { Authorization: `Bearer ${token}` },
|
||||
});
|
||||
const text = await resp.text();
|
||||
let body: any; try { body = JSON.parse(text); } catch { body = text; }
|
||||
return { status: resp.status, body };
|
||||
}, { id: linkedDowntimeId });
|
||||
expect(delDtResp.status).toBe(409);
|
||||
expect(delDtResp.body.error?.code ?? delDtResp.body.code).toBe('already_exists');
|
||||
expect(delDtResp.body.error?.message ?? delDtResp.body.message).toMatch(/cannot delete planned maintenance because it is referenced/i);
|
||||
|
||||
// Cleanup: unlink the downtime (clear alertIds), delete the downtime, delete the rule.
|
||||
await page.evaluate(async ({ dtId, ruleId, name }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const now = Date.now();
|
||||
// PUT downtime with alertIds: [] to break the cascade constraint
|
||||
await fetch(`/api/v1/downtime_schedules/${dtId}`, {
|
||||
method: 'PUT',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify({
|
||||
name,
|
||||
description: 'spec.ts cleanup — unlinked',
|
||||
schedule: {
|
||||
timezone: 'UTC',
|
||||
startTime: new Date(now).toISOString(),
|
||||
endTime: new Date(now + 24 * 60 * 60 * 1000).toISOString(),
|
||||
recurrence: null,
|
||||
},
|
||||
alertIds: [],
|
||||
}),
|
||||
});
|
||||
await fetch(`/api/v1/downtime_schedules/${dtId}`, {
|
||||
method: 'DELETE',
|
||||
headers: { Authorization: `Bearer ${token}` },
|
||||
});
|
||||
await fetch(`/api/v2/rules/${ruleId}`, {
|
||||
method: 'DELETE',
|
||||
headers: { Authorization: `Bearer ${token}` },
|
||||
});
|
||||
}, { dtId: linkedDowntimeId, ruleId: classicRuleId, name: `${E2E_TAG}-downtime-linked` });
|
||||
});
|
||||
});
|
||||
490
tests/e2e/legacy/alerts/routing-policies.spec.ts
Normal file
490
tests/e2e/legacy/alerts/routing-policies.spec.ts
Normal file
@@ -0,0 +1,490 @@
|
||||
import { test, expect } from '../../fixtures/auth';
|
||||
|
||||
test.describe('Routing Policies', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
// Login to the application
|
||||
|
||||
// Navigate to Routing Policies through sidebar navigation
|
||||
await page.locator('svg.lucide-bell-dot').click();
|
||||
|
||||
// Navigate to Configuration tab
|
||||
await page.getByRole('tab', { name: 'Configuration' }).click();
|
||||
|
||||
// Navigate to Routing Policies tab
|
||||
await page.getByRole('tab', { name: 'Routing Policies' }).click();
|
||||
});
|
||||
|
||||
test(
|
||||
'Navigate to Routing Policies and verify page layout',
|
||||
async ({ authedPage: page }) => {
|
||||
// 1. Verify header contains "Routing Policies" title
|
||||
await expect(
|
||||
page.getByRole('heading', { name: 'Routing Policies' }),
|
||||
).toBeVisible();
|
||||
|
||||
// 2. Verify search functionality is prominently displayed
|
||||
const searchBox = page.getByRole('textbox', {
|
||||
name: 'Search for a routing policy...',
|
||||
});
|
||||
await expect(searchBox).toBeVisible();
|
||||
|
||||
// 3. Verify "New routing policy" button with plus icon is visible
|
||||
const newPolicyButton = page.getByRole('button', {
|
||||
name: 'plus New routing policy',
|
||||
});
|
||||
await expect(newPolicyButton).toBeVisible();
|
||||
|
||||
// 4. Verify policy list displays in table format
|
||||
await expect(page.getByRole('table')).toBeVisible();
|
||||
|
||||
// 5. Verify pagination controls are present at bottom
|
||||
await expect(page.getByRole('list')).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Create Routing Policies with Basic and Complex Expressions',
|
||||
async ({ authedPage: page }) => {
|
||||
// 1. Navigate to Routing Policies page (done in beforeEach)
|
||||
|
||||
// 2. Click "New routing policy" button
|
||||
await page
|
||||
.getByRole('button', { name: 'plus New routing policy' })
|
||||
.click();
|
||||
|
||||
// 3. Verify "Create routing policy" dialog opens
|
||||
await expect(
|
||||
page.getByRole('dialog', { name: 'Create routing policy' }),
|
||||
).toBeVisible();
|
||||
|
||||
// 4. Fill in routing policy name
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. Base routing policy...' })
|
||||
.fill('Critical Payment Alerts');
|
||||
|
||||
// 5. Fill in description
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. This is a routing policy' })
|
||||
.fill('Route critical payment service alerts to Slack');
|
||||
|
||||
// 6. Enter expression
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. service.name == "payment' })
|
||||
.fill('service.name == "payment" && severity == "critical"');
|
||||
|
||||
// 7. Select notification channel from dropdown
|
||||
await page.locator('.ant-select').click();
|
||||
await page.locator('.ant-select-item').first().click();
|
||||
|
||||
// 8. Click "Save Routing Policy"
|
||||
await page.getByRole('button', { name: 'Save Routing Policy' }).click();
|
||||
|
||||
// 9. Verify success message appears
|
||||
await expect(
|
||||
page.getByText('Routing policy created successfully'),
|
||||
).toBeVisible();
|
||||
|
||||
// 10. Create second policy with complex expression
|
||||
await page
|
||||
.getByRole('button', { name: 'plus New routing policy' })
|
||||
.click();
|
||||
|
||||
// 11. Enter name for complex policy
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. Base routing policy...' })
|
||||
.fill('Multi-Condition Alert Routing');
|
||||
|
||||
// 12. Enter description for complex policy
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. This is a routing policy' })
|
||||
.fill('Route alerts based on multiple conditions');
|
||||
|
||||
// 13. Enter complex expression with multiple conditions
|
||||
const complexExpression =
|
||||
'(service.name == "payment" || service.name == "billing") && (severity == "critical" || severity == "high") && region == "us-east-1"';
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. service.name == "payment' })
|
||||
.fill(complexExpression);
|
||||
|
||||
// 14. Select notification channel for complex policy
|
||||
await page.locator('.ant-select').click();
|
||||
await page.locator('.ant-select-item').first().click();
|
||||
|
||||
// 15. Save the complex policy
|
||||
await page.getByRole('button', { name: 'Save Routing Policy' }).click();
|
||||
|
||||
// 16. Verify complex policy saves successfully
|
||||
await expect(
|
||||
page.getByText('Routing policy created successfully'),
|
||||
).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Create Policy with Empty Required Fields',
|
||||
async ({ authedPage: page }) => {
|
||||
// 1. Click "New routing policy" button
|
||||
await page
|
||||
.getByRole('button', { name: 'plus New routing policy' })
|
||||
.click();
|
||||
|
||||
// 2. Wait for dialog to be visible
|
||||
await expect(
|
||||
page.getByRole('dialog', { name: 'Create routing policy' }),
|
||||
).toBeVisible();
|
||||
|
||||
// 3. Leave name field empty and fill other fields
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. service.name == "payment' })
|
||||
.fill('service.name == "test"');
|
||||
|
||||
// 4. Select notification channel
|
||||
await page.locator('.ant-select').click();
|
||||
await page.locator('.ant-select-item').first().click();
|
||||
|
||||
// 5. Attempt to save without required name
|
||||
await page.getByRole('button', { name: 'Save Routing Policy' }).click();
|
||||
|
||||
// 6. Wait a moment for validation to trigger
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
|
||||
// 7. Verify the form doesn't submit and dialog remains open
|
||||
await expect(
|
||||
page.getByRole('dialog', { name: 'Create routing policy' }),
|
||||
).toBeVisible();
|
||||
|
||||
// 8. Check that the name field exists and is still empty
|
||||
const nameField = page.getByRole('textbox', {
|
||||
name: 'e.g. Base routing policy...',
|
||||
});
|
||||
|
||||
// 9. Verify the field is still empty (indicating form didn't submit)
|
||||
await expect(nameField).toHaveValue('');
|
||||
|
||||
// 10. Verify the specific error message appears
|
||||
await expect(
|
||||
page.getByText('Please provide a name for the routing policy'),
|
||||
).toBeVisible();
|
||||
|
||||
// 11. Fill the required name field to verify form can now be submitted
|
||||
await nameField.fill('Test Policy Name');
|
||||
|
||||
// 12. Verify error message disappears after filling the field
|
||||
await expect(
|
||||
page.getByText('Please provide a name for the routing policy'),
|
||||
).toBeHidden();
|
||||
|
||||
// 13. Attempt to save again with name filled
|
||||
await page.getByRole('button', { name: 'Save Routing Policy' }).click();
|
||||
|
||||
// 14. Verify successful creation or that we progress past validation
|
||||
await expect(
|
||||
page.getByText('Routing policy created successfully'),
|
||||
).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Cancel Policy Creation',
|
||||
async ({ authedPage: page }) => {
|
||||
// 1. Click "New routing policy" button
|
||||
await page
|
||||
.getByRole('button', { name: 'plus New routing policy' })
|
||||
.click();
|
||||
|
||||
// 2. Fill in some form fields
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. Base routing policy...' })
|
||||
.fill('Test Policy');
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. This is a routing policy' })
|
||||
.fill('Test description');
|
||||
|
||||
// 3. Click "Cancel" button
|
||||
await page.getByRole('button', { name: 'Cancel' }).click();
|
||||
|
||||
// 4. Verify dialog closes and returns to main list
|
||||
await expect(page.getByRole('dialog')).toBeHidden();
|
||||
await expect(
|
||||
page.getByRole('heading', { name: 'Routing Policies' }),
|
||||
).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Search Policies by Name',
|
||||
async ({ authedPage: page }) => {
|
||||
// 1. Create a test policy first
|
||||
await page
|
||||
.getByRole('button', { name: 'plus New routing policy' })
|
||||
.click();
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. Base routing policy...' })
|
||||
.fill('Searchable Test Policy');
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. This is a routing policy' })
|
||||
.fill('Policy for search testing');
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. service.name == "payment' })
|
||||
.fill('service.name == "search-test"');
|
||||
await page.locator('.ant-select').click();
|
||||
await page.locator('.ant-select-item').first().click();
|
||||
await page.getByRole('button', { name: 'Save Routing Policy' }).click();
|
||||
|
||||
// Wait for creation success
|
||||
await expect(
|
||||
page.getByText('Routing policy created successfully'),
|
||||
).toBeVisible();
|
||||
|
||||
// 2. Navigate to routing policies page with multiple policies
|
||||
await page.goto(
|
||||
'https://quiet-buffalo.us.staging.signoz.cloud/alerts?tab=Configuration',
|
||||
);
|
||||
await new Promise(f => setTimeout(f, 2000)); // Wait for page load
|
||||
await page.getByRole('tab', { name: 'Routing Policies' }).click();
|
||||
|
||||
// 3. Enter a policy name in the search box
|
||||
await page
|
||||
.getByRole('textbox', { name: 'Search for a routing policy...' })
|
||||
.fill('Searchable Test Policy');
|
||||
|
||||
// 4. Press Enter to execute search
|
||||
await page.keyboard.press('Enter');
|
||||
|
||||
// 5. Verify filtered results show only matching policy
|
||||
await expect(page.getByText('Searchable Test Policy').first()).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Search with No Results',
|
||||
async ({ authedPage: page }) => {
|
||||
// 1. Enter a search term that matches no policies
|
||||
await page
|
||||
.getByRole('textbox', { name: 'Search for a routing policy...' })
|
||||
.fill('NonExistentPolicyName12345');
|
||||
await page.keyboard.press('Enter');
|
||||
|
||||
// 2. Verify appropriate empty state or no results message
|
||||
// Note: The exact behavior would depend on how the application handles no search results
|
||||
const searchBox = page.getByRole('textbox', {
|
||||
name: 'Search for a routing policy...',
|
||||
});
|
||||
await expect(searchBox).toHaveValue('NonExistentPolicyName12345');
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'View Policy Details',
|
||||
async ({ authedPage: page }) => {
|
||||
// 1. Create a policy with unique name
|
||||
const uniquePolicyName = `Test Policy ${Date.now()}`;
|
||||
|
||||
await page
|
||||
.getByRole('button', { name: 'plus New routing policy' })
|
||||
.click();
|
||||
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. Base routing policy...' })
|
||||
.fill(uniquePolicyName);
|
||||
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. This is a routing policy' })
|
||||
.fill('Test description for policy details');
|
||||
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. service.name == "payment' })
|
||||
.fill('service.name == "test-details"');
|
||||
|
||||
await page.locator('.ant-select').click();
|
||||
await page.locator('.ant-select-item').first().click();
|
||||
|
||||
await page.getByRole('button', { name: 'Save Routing Policy' }).click();
|
||||
|
||||
await expect(
|
||||
page.getByText('Routing policy created successfully'),
|
||||
).toBeVisible();
|
||||
|
||||
// 2. Search for the created policy
|
||||
const searchBox = page.getByRole('textbox', {
|
||||
name: 'Search for a routing policy...',
|
||||
});
|
||||
await searchBox.fill(uniquePolicyName);
|
||||
await page.keyboard.press('Enter');
|
||||
|
||||
// 3. Wait for search results and click on the policy to expand it
|
||||
await expect(page.getByText(uniquePolicyName)).toBeVisible();
|
||||
const policyTab = page.getByRole('tab', { name: 'right' }).first();
|
||||
await policyTab.click();
|
||||
|
||||
// 4. Wait for expansion
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
|
||||
// 5. Verify all field keys are present
|
||||
await expect(page.getByText('Created by')).toBeVisible();
|
||||
await expect(page.getByText('Created on')).toBeVisible();
|
||||
await expect(page.getByText('Updated by')).toBeVisible();
|
||||
await expect(page.getByText('Updated on')).toBeVisible();
|
||||
await expect(page.getByText('Expression')).toBeVisible();
|
||||
await expect(page.getByText('Description', { exact: true })).toBeVisible();
|
||||
await expect(page.getByText('Channels')).toBeVisible();
|
||||
|
||||
// 6. Verify the specific values we created
|
||||
await expect(page.getByText(uniquePolicyName)).toBeVisible();
|
||||
await expect(page.getByText('Test description for policy details')).toBeVisible();
|
||||
await expect(page.getByText('service.name == "test-details"')).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Edit Existing Policy',
|
||||
async ({ authedPage: page }) => {
|
||||
// 1. Create a policy to edit first
|
||||
const uniquePolicyName = `Policy to Edit ${Date.now()}`;
|
||||
|
||||
await page
|
||||
.getByRole('button', { name: 'plus New routing policy' })
|
||||
.click();
|
||||
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. Base routing policy...' })
|
||||
.fill(uniquePolicyName);
|
||||
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. This is a routing policy' })
|
||||
.fill('Original description');
|
||||
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. service.name == "payment' })
|
||||
.fill('service.name == "original"');
|
||||
|
||||
await page.locator('.ant-select').click();
|
||||
await page.locator('.ant-select-item').first().click();
|
||||
|
||||
await page.getByRole('button', { name: 'Save Routing Policy' }).click();
|
||||
|
||||
await expect(
|
||||
page.getByText('Routing policy created successfully'),
|
||||
).toBeVisible();
|
||||
|
||||
// 2. Search for the created policy
|
||||
const searchBox = page.getByRole('textbox', {
|
||||
name: 'Search for a routing policy...',
|
||||
});
|
||||
await searchBox.fill(uniquePolicyName);
|
||||
await page.keyboard.press('Enter');
|
||||
|
||||
// 3. Wait for search results and click on the policy to expand it
|
||||
await expect(page.getByText(uniquePolicyName)).toBeVisible();
|
||||
const policyTab = page.getByRole('tab', { name: 'right' }).first();
|
||||
await policyTab.click();
|
||||
|
||||
// 4. Wait for expansion and click edit button
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
const editButton = page.getByTestId('edit-routing-policy');
|
||||
await editButton.click();
|
||||
|
||||
// 5. Verify edit dialog opens
|
||||
await expect(
|
||||
page.getByRole('dialog', { name: 'Edit routing policy' }),
|
||||
).toBeVisible();
|
||||
|
||||
// 6. Update the title and description
|
||||
const updatedPolicyName = `Updated ${uniquePolicyName}`;
|
||||
const nameField = page.getByRole('textbox', { name: 'e.g. Base routing policy...' });
|
||||
await nameField.clear();
|
||||
await nameField.fill(updatedPolicyName);
|
||||
|
||||
const descriptionField = page.getByRole('textbox', { name: 'e.g. This is a routing policy' });
|
||||
await descriptionField.clear();
|
||||
await descriptionField.fill('Updated description after editing');
|
||||
|
||||
// 7. Save the changes
|
||||
await page.getByRole('button', { name: 'Save Routing Policy' }).click();
|
||||
|
||||
// 8. Verify success toast message appears
|
||||
await expect(
|
||||
page.getByText('Routing policy updated successfully'),
|
||||
).toBeVisible();
|
||||
|
||||
// 9. Search for the updated policy name to ensure it exists
|
||||
await searchBox.clear();
|
||||
await searchBox.fill(updatedPolicyName);
|
||||
await page.keyboard.press('Enter');
|
||||
|
||||
// 10. Verify the updated policy is found
|
||||
await expect(page.getByText(updatedPolicyName)).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Delete Routing Policy',
|
||||
async ({ authedPage: page }) => {
|
||||
// 1. Create a policy to delete first
|
||||
const uniquePolicyName = `Policy to Delete ${Date.now()}`;
|
||||
|
||||
await page
|
||||
.getByRole('button', { name: 'plus New routing policy' })
|
||||
.click();
|
||||
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. Base routing policy...' })
|
||||
.fill(uniquePolicyName);
|
||||
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. This is a routing policy' })
|
||||
.fill('This policy will be deleted');
|
||||
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. service.name == "payment' })
|
||||
.fill('service.name == "delete-test"');
|
||||
|
||||
await page.locator('.ant-select').click();
|
||||
await page.locator('.ant-select-item').first().click();
|
||||
|
||||
await page.getByRole('button', { name: 'Save Routing Policy' }).click();
|
||||
|
||||
await expect(
|
||||
page.getByText('Routing policy created successfully'),
|
||||
).toBeVisible();
|
||||
|
||||
// 2. Search for the created policy
|
||||
const searchBox = page.getByRole('textbox', {
|
||||
name: 'Search for a routing policy...',
|
||||
});
|
||||
await searchBox.fill(uniquePolicyName);
|
||||
await page.keyboard.press('Enter');
|
||||
|
||||
// 3. Wait for search results and click on the policy to expand it
|
||||
await expect(page.getByText(uniquePolicyName)).toBeVisible();
|
||||
const policyTab = page.getByRole('tab', { name: 'right' }).first();
|
||||
await policyTab.click();
|
||||
|
||||
// 4. Wait for expansion and click delete button
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
const deleteButton = page.getByTestId('delete-routing-policy');
|
||||
await deleteButton.click();
|
||||
|
||||
// 5. Verify delete confirmation modal opens
|
||||
await expect(
|
||||
page.getByRole('dialog').filter({ hasText: 'Delete' }),
|
||||
).toBeVisible();
|
||||
|
||||
// 6. Click confirm to delete the policy
|
||||
await page.getByRole('button', { name: 'Delete' }).click();
|
||||
|
||||
// 7. Verify success notification appears
|
||||
await expect(
|
||||
page.getByText('Routing policy deleted successfully'),
|
||||
).toBeVisible();
|
||||
|
||||
// 8. Verify the deleted policy is no longer in the list
|
||||
await searchBox.clear();
|
||||
await searchBox.fill(uniquePolicyName);
|
||||
await page.keyboard.press('Enter');
|
||||
|
||||
// 9. Verify the policy is not found
|
||||
await expect(page.getByText(uniquePolicyName)).toBeHidden();
|
||||
},
|
||||
);
|
||||
});
|
||||
848
tests/e2e/legacy/dashboards/dashboards-list.spec.ts
Normal file
848
tests/e2e/legacy/dashboards/dashboards-list.spec.ts
Normal file
@@ -0,0 +1,848 @@
|
||||
import { test, expect } from '../../fixtures/auth';
|
||||
|
||||
test.describe('Dashboards List Page', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
});
|
||||
|
||||
// ─── 1. Page Load and Layout ──────────────────────────────────────────────
|
||||
//
|
||||
// Verifies the critical chrome of the list page: heading, subtitle, search
|
||||
// input, sort control, at least one dashboard row, pagination, and the
|
||||
// Feedback / Share header buttons. These run as @viewer because they cover
|
||||
// elements visible to every role.
|
||||
|
||||
test('1.1 Dashboard list page loads correctly', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
// Wait for the list label as the reliable "page is ready" signal — it
|
||||
// appears only after the dashboard data has loaded.
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Fresh load should have no query params
|
||||
await expect(page).toHaveURL('/dashboard');
|
||||
await expect(page).toHaveTitle('SigNoz | All Dashboards');
|
||||
|
||||
// Page identity
|
||||
await expect(page.getByRole('heading', { name: 'Dashboards', level: 1 })).toBeVisible();
|
||||
await expect(page.getByText('Create and manage dashboards for your workspace.')).toBeVisible();
|
||||
|
||||
// Core controls
|
||||
await expect(page.getByRole('textbox', { name: 'Search by name, description, or tags...' })).toBeVisible();
|
||||
await expect(page.getByText('All Dashboards')).toBeVisible();
|
||||
await expect(page.getByTestId('sort-by')).toBeVisible();
|
||||
|
||||
// At least one dashboard row — thumbnail is the most stable row anchor
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
|
||||
// Pagination range text confirms rows were fetched (e.g. "1 — 20 of 42")
|
||||
await expect(page.getByText(/\d+ — \d+ of \d+/)).toBeVisible();
|
||||
|
||||
// Global header actions
|
||||
await expect(page.getByRole('button', { name: 'Feedback' })).toBeVisible();
|
||||
await expect(page.getByRole('button', { name: 'Share' })).toBeVisible();
|
||||
});
|
||||
|
||||
test('1.2 Dashboard list shows correct data fields per row', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
// Wait until thumbnails are rendered — this confirms row data has arrived
|
||||
await page.getByAltText('dashboard-image').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Each row has a thumbnail image identified by the alt text set by the app
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
|
||||
// Each row shows a "last updated" timestamp — verify the date format
|
||||
// exists somewhere in the rendered list (e.g. "Mar 24, 2026")
|
||||
const pageText = await page.locator('body').textContent();
|
||||
expect(pageText).toMatch(/\w{3} \d{1,2}, \d{4}/);
|
||||
|
||||
// Each row shows the creator's email address
|
||||
await expect(page.getByText(/@/).first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('1.3 Pagination bar shows correct item count', async ({ authedPage: page }) => {
|
||||
// Pre-condition: staging workspace has more than 20 dashboards so the
|
||||
// pagination bar is rendered and Previous is disabled on the first page.
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Range indicator, e.g. "1 — 20 of 42", confirms correct page size
|
||||
await expect(page.getByText(/1\s*—\s*20 of/)).toBeVisible();
|
||||
|
||||
// Previous Page is always disabled on the first page
|
||||
await expect(page.getByRole('button', { name: 'Previous Page' })).toBeDisabled();
|
||||
});
|
||||
|
||||
// ─── 2. Search Functionality ──────────────────────────────────────────────
|
||||
//
|
||||
// The search input filters by title, description, and tags simultaneously.
|
||||
// Results update in real time and the active query is reflected in the URL
|
||||
// as ?search=<term>. All visibility tests run as @viewer; the description
|
||||
// search requires @editor to set up a dashboard with a known description.
|
||||
|
||||
test('2.1 Search by title returns matching dashboards', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const searchInput = page.getByRole('textbox', { name: 'Search by name, description, or tags...' });
|
||||
|
||||
// "APM Metrics" is a known dashboard in the workspace — searching by its
|
||||
// exact title should return it and reflect the term in the URL
|
||||
await searchInput.fill('APM Metrics');
|
||||
await expect(page).toHaveURL(/search=APM\+Metrics/);
|
||||
await expect(searchInput).toHaveValue('APM Metrics');
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
const pageText = await page.locator('body').textContent();
|
||||
expect(pageText?.toUpperCase()).toContain('APM METRICS');
|
||||
});
|
||||
|
||||
test('2.2 Search by tag returns dashboards that carry that tag', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const searchInput = page.getByRole('textbox', { name: 'Search by name, description, or tags...' });
|
||||
|
||||
// "latency" is a tag on the APM Metrics dashboard — searching by tag value
|
||||
// alone (no title match) should still surface that dashboard
|
||||
await searchInput.fill('latency');
|
||||
await expect(page).toHaveURL(/search=latency/);
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
const pageText = await page.locator('body').textContent();
|
||||
expect(pageText?.toUpperCase()).toContain('APM METRICS');
|
||||
});
|
||||
|
||||
test('2.3 Search by description returns matching dashboards', async ({ authedPage: page }) => {
|
||||
// Create a dashboard with a known, unique description so we have a
|
||||
// reliable target for the description search without relying on pre-existing data
|
||||
const uniqueDesc = `desc-search-${Date.now()}`;
|
||||
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Create via inline name field then set its description via Configure
|
||||
await page.getByRole('textbox', { name: 'Enter dashboard name...' }).fill(`Search Test ${Date.now()}`);
|
||||
await page.getByRole('button', { name: 'Submit' }).click();
|
||||
await expect(page).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
|
||||
// Set the description in the Configure dialog
|
||||
await page.getByRole('button', { name: 'Configure' }).click();
|
||||
await page.getByRole('dialog').waitFor({ state: 'visible' });
|
||||
await page.getByRole('textbox', { name: /description/i }).fill(uniqueDesc);
|
||||
await page.getByRole('button', { name: 'Save' }).click();
|
||||
|
||||
// Return to the list and search using the description text
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
const searchInput = page.getByRole('textbox', { name: 'Search by name, description, or tags...' });
|
||||
await searchInput.fill(uniqueDesc);
|
||||
|
||||
// The dashboard we just created should appear
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('2.4 Dashboard with no tags is found by title search', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const searchInput = page.getByRole('textbox', { name: 'Search by name, description, or tags...' });
|
||||
|
||||
// "PromQL and Clickhouse SQL" has no tags — searching its title should
|
||||
// still return it, confirming that tag absence does not break title search
|
||||
await searchInput.fill('PromQL and Clickhouse SQL');
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
const pageText = await page.locator('body').textContent();
|
||||
expect(pageText?.toUpperCase()).toContain('PROMQL AND CLICKHOUSE SQL');
|
||||
});
|
||||
|
||||
test('2.5 Dashboard with no description is found by title search', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const searchInput = page.getByRole('textbox', { name: 'Search by name, description, or tags...' });
|
||||
|
||||
// APM Metrics has no description — searching its title must still return it,
|
||||
// confirming that description absence does not break title search
|
||||
await searchInput.fill('APM Metrics');
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
const pageText = await page.locator('body').textContent();
|
||||
expect(pageText?.toUpperCase()).toContain('APM METRICS');
|
||||
});
|
||||
|
||||
test('2.6 Search state is reflected in URL and pre-fills on direct navigation', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const searchInput = page.getByRole('textbox', { name: 'Search by name, description, or tags...' });
|
||||
await searchInput.fill('PromQL');
|
||||
await expect(page).toHaveURL(/search=PromQL/);
|
||||
|
||||
// Opening the URL directly (bookmark / share) should restore search state
|
||||
await page.goto('/dashboard?search=PromQL');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await expect(searchInput).toHaveValue('PromQL');
|
||||
await expect(page.getByText('PromQL and Clickhouse SQL').first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('2.7 Clearing search restores the full list', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const searchInput = page.getByRole('textbox', { name: 'Search by name, description, or tags...' });
|
||||
|
||||
await searchInput.fill('APM');
|
||||
await expect(page).toHaveURL(/search=APM/);
|
||||
|
||||
// Clearing the field removes the param and brings back all dashboards
|
||||
await searchInput.fill('');
|
||||
await expect(page).not.toHaveURL(/search=/);
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('2.8 Search with no matching results shows empty state', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const searchInput = page.getByRole('textbox', { name: 'Search by name, description, or tags...' });
|
||||
|
||||
// A nonsense term guarantees no matches across title, description, or tags
|
||||
await searchInput.fill('xyznonexistent999');
|
||||
|
||||
// No thumbnails — list is empty, no error or broken layout
|
||||
await expect(page.getByAltText('dashboard-image')).toHaveCount(0);
|
||||
await expect(searchInput).toBeVisible();
|
||||
await expect(searchInput).toHaveValue('xyznonexistent999');
|
||||
});
|
||||
|
||||
test('2.9 Search is case-insensitive', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const searchInput = page.getByRole('textbox', { name: 'Search by name, description, or tags...' });
|
||||
|
||||
// Lowercase version of a mixed-case dashboard name — should still match
|
||||
await searchInput.fill('apm metrics');
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
const pageText = await page.locator('body').textContent();
|
||||
expect(pageText?.toUpperCase()).toContain('APM METRICS');
|
||||
});
|
||||
|
||||
// ─── 3. Sorting ───────────────────────────────────────────────────────────
|
||||
//
|
||||
// Known behaviour (verified against live app):
|
||||
// - Fresh load: no sort params in URL; list is already descending (server default)
|
||||
// - First click: URL gains ?columnKey=updatedAt&order=descend
|
||||
// - Subsequent clicks: URL stays on order=descend — ascending is not yet implemented
|
||||
//
|
||||
// Tests document the current state. The ascending limitation is explicitly
|
||||
// noted so it is visible during review and easy to fix when implemented.
|
||||
|
||||
test('3.1 Default load has no sort params and shows most recently updated dashboard first', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// On fresh load the URL should be clean — sort params only appear after
|
||||
// the user interacts with the sort button
|
||||
await expect(page).toHaveURL('/dashboard');
|
||||
await expect(page).not.toHaveURL(/columnKey/);
|
||||
await expect(page).not.toHaveURL(/order/);
|
||||
|
||||
// The list is already sorted descending by default (server-side).
|
||||
// Verify by comparing the first two rows' timestamps — the first row must
|
||||
// be more recent than or equal to the second.
|
||||
const rows = page.getByAltText('dashboard-image');
|
||||
await expect(rows.first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('3.2 First click on sort button adds columnKey=updatedAt&order=descend to URL', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Before any interaction — no sort params
|
||||
await expect(page).not.toHaveURL(/columnKey/);
|
||||
|
||||
await page.getByTestId('sort-by').click();
|
||||
|
||||
// After first click the sort state is written to the URL
|
||||
await expect(page).toHaveURL(/columnKey=updatedAt/);
|
||||
await expect(page).toHaveURL(/order=descend/);
|
||||
|
||||
// List should still be rendering rows correctly
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('3.3 Subsequent sort clicks keep order=descend (ascending not yet implemented)', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const sortButton = page.getByTestId('sort-by');
|
||||
|
||||
// First click — sets descend
|
||||
await sortButton.click();
|
||||
await expect(page).toHaveURL(/order=descend/);
|
||||
|
||||
// Second click — known limitation: order remains descend, does not flip to ascend
|
||||
await sortButton.click();
|
||||
await expect(page).toHaveURL(/order=descend/);
|
||||
await expect(page).not.toHaveURL(/order=ascend/);
|
||||
});
|
||||
|
||||
// ─── 4. Row Actions (Context Menu) ───────────────────────────────────────
|
||||
//
|
||||
// The three-dot action icon (data-testid: dashboard-action-icon) is always
|
||||
// visible on every row — no hover required. Clicking it opens a tooltip
|
||||
// popover. Items inside are scoped to getByRole('tooltip') to avoid
|
||||
// accidentally matching other elements on the page.
|
||||
//
|
||||
// Role visibility:
|
||||
// @admin — View, Open in New Tab, Copy Link, Export JSON, Delete dashboard
|
||||
// @editor — View, Open in New Tab, Copy Link, Export JSON (no Delete)
|
||||
// @viewer — action icon is hidden entirely
|
||||
|
||||
test('4.1 Admin sees all five options in the action menu', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
const tooltip = page.getByRole('tooltip');
|
||||
await expect(tooltip).toBeVisible();
|
||||
|
||||
// All five items must be present for admin
|
||||
await expect(tooltip.getByRole('button', { name: 'View' })).toBeVisible();
|
||||
await expect(tooltip.getByRole('button', { name: 'Open in New Tab' })).toBeVisible();
|
||||
await expect(tooltip.getByRole('button', { name: 'Copy Link' })).toBeVisible();
|
||||
await expect(tooltip.getByRole('button', { name: 'Export JSON' })).toBeVisible();
|
||||
// Delete is rendered as a generic (not a button) in a separate section
|
||||
await expect(tooltip.getByText('Delete dashboard')).toBeVisible();
|
||||
});
|
||||
|
||||
test('4.2 Editor sees four options — Delete dashboard is not present', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
const tooltip = page.getByRole('tooltip');
|
||||
await expect(tooltip).toBeVisible();
|
||||
|
||||
await expect(tooltip.getByRole('button', { name: 'View' })).toBeVisible();
|
||||
await expect(tooltip.getByRole('button', { name: 'Open in New Tab' })).toBeVisible();
|
||||
await expect(tooltip.getByRole('button', { name: 'Copy Link' })).toBeVisible();
|
||||
await expect(tooltip.getByRole('button', { name: 'Export JSON' })).toBeVisible();
|
||||
|
||||
// Viewer and Editor cannot delete — the item must be absent
|
||||
await expect(tooltip.getByText('Delete dashboard')).not.toBeVisible();
|
||||
});
|
||||
|
||||
test('4.3 Viewer has no action icon on dashboard rows', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// The action icon must not be present in the DOM for viewer role
|
||||
await expect(page.getByTestId('dashboard-action-icon')).toHaveCount(0);
|
||||
});
|
||||
|
||||
test('4.4 View action navigates to the dashboard detail page', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
await page.getByRole('tooltip').getByRole('button', { name: 'View' }).click();
|
||||
|
||||
// Should land on the detail page — UUID in the path confirms navigation
|
||||
await expect(page).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
});
|
||||
|
||||
test('4.5 Open in New Tab opens the dashboard in a new browser tab', async ({ page, context }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
|
||||
// waitForEvent('page') must be set up before the click that triggers it
|
||||
const [newPage] = await Promise.all([
|
||||
context.waitForEvent('page'),
|
||||
page.getByRole('tooltip').getByRole('button', { name: 'Open in New Tab' }).click(),
|
||||
]);
|
||||
|
||||
await newPage.waitForLoadState();
|
||||
await expect(newPage).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
await newPage.close();
|
||||
});
|
||||
|
||||
test('4.6 Copy Link copies the dashboard URL to the clipboard', async ({ page, context }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Grant clipboard permissions so we can read back what was written
|
||||
await context.grantPermissions(['clipboard-read', 'clipboard-write']);
|
||||
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
await page.getByRole('tooltip').getByRole('button', { name: 'Copy Link' }).click();
|
||||
|
||||
// App shows a success notification after copying
|
||||
await expect(page.getByText(/copied|success/i)).toBeVisible();
|
||||
|
||||
// Clipboard must contain a valid dashboard detail URL.
|
||||
// Cast through unknown to access browser globals inside page.evaluate.
|
||||
const clipboardText = await page.evaluate(async () => {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
return await (globalThis as any).navigator.clipboard.readText();
|
||||
});
|
||||
expect(clipboardText).toMatch(/\/dashboard\/[0-9a-f-]+/);
|
||||
});
|
||||
|
||||
test('4.7 Export JSON downloads the dashboard as a JSON file', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
|
||||
// waitForEvent('download') must be in place before the triggering click
|
||||
const [download] = await Promise.all([
|
||||
page.waitForEvent('download'),
|
||||
page.getByRole('tooltip').getByRole('button', { name: 'Export JSON' }).click(),
|
||||
]);
|
||||
|
||||
expect(download.suggestedFilename()).toMatch(/\.json$/);
|
||||
});
|
||||
|
||||
test('4.8 Action menu closes when clicking outside the popover', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
await expect(page.getByRole('tooltip')).toBeVisible();
|
||||
|
||||
// Click on a neutral area — the page heading — to dismiss the popover
|
||||
await page.getByRole('heading', { name: 'Dashboards', level: 1 }).click();
|
||||
await expect(page.getByRole('tooltip')).not.toBeVisible();
|
||||
|
||||
// No navigation should have occurred
|
||||
await expect(page).toHaveURL(/\/dashboard($|\?)/);
|
||||
});
|
||||
|
||||
// ─── 5. Creating Dashboards ───────────────────────────────────────────────
|
||||
//
|
||||
// Three creation paths exist: inline name field, New dashboard dropdown →
|
||||
// Create dashboard, and New dashboard dropdown → Import JSON.
|
||||
// Create controls (name input, Submit, New dashboard button) are visible
|
||||
// to Editor and Admin only — hidden from Viewer entirely.
|
||||
|
||||
test('5.1 Create controls are hidden from Viewer', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// None of the create affordances should be present for a viewer
|
||||
await expect(page.getByRole('textbox', { name: 'Enter dashboard name...' })).not.toBeVisible();
|
||||
await expect(page.getByRole('button', { name: 'Submit' })).not.toBeVisible();
|
||||
await expect(page.getByRole('button', { name: 'New dashboard' })).not.toBeVisible();
|
||||
});
|
||||
|
||||
test('5.2 Submit button is disabled when the name input is empty', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Before typing, Submit must be disabled — clicking it should do nothing
|
||||
await expect(page.getByRole('button', { name: 'Submit' })).toBeDisabled();
|
||||
});
|
||||
|
||||
test('5.3 Inline name field creates a named dashboard and navigates to it', async ({ authedPage: page }) => {
|
||||
const name = `Test Dashboard ${Date.now()}`;
|
||||
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const nameInput = page.getByRole('textbox', { name: 'Enter dashboard name...' });
|
||||
await nameInput.fill(name);
|
||||
|
||||
// Submit becomes enabled once text is present
|
||||
await expect(page.getByRole('button', { name: 'Submit' })).toBeEnabled();
|
||||
await page.getByRole('button', { name: 'Submit' }).click();
|
||||
|
||||
// Should navigate directly to the new dashboard's detail page
|
||||
await expect(page).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
|
||||
// Clean up — delete the dashboard we just created
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await page.getByRole('textbox', { name: 'Search by name, description, or tags...' }).fill(name);
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
await page.getByRole('tooltip').getByText('Delete dashboard').click();
|
||||
await page.getByRole('button', { name: 'Delete' }).click();
|
||||
});
|
||||
|
||||
test('5.4 New dashboard dropdown shows exactly three options', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByRole('button', { name: 'New dashboard' }).click();
|
||||
const menu = page.getByRole('menu');
|
||||
await expect(menu).toBeVisible();
|
||||
|
||||
// Exactly three items: Create dashboard, Import JSON, View templates
|
||||
await expect(menu.getByRole('menuitem', { name: 'Create dashboard' })).toBeVisible();
|
||||
await expect(menu.getByRole('menuitem', { name: 'Import JSON' })).toBeVisible();
|
||||
await expect(menu.getByRole('menuitem', { name: 'View templates' })).toBeVisible();
|
||||
});
|
||||
|
||||
test('5.5 Create dashboard navigates to new dashboard with default name', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByRole('button', { name: 'New dashboard' }).click();
|
||||
await page.getByRole('menuitem', { name: 'Create dashboard' }).click();
|
||||
|
||||
// New dashboard detail page loads
|
||||
await expect(page).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
|
||||
// Default name is "Sample Title" and onboarding state is shown
|
||||
await expect(page.getByText('Configure your new dashboard')).toBeVisible();
|
||||
await expect(page.getByRole('button', { name: 'Configure' })).toBeVisible();
|
||||
await expect(page.getByRole('button', { name: /New Panel/ })).toBeVisible();
|
||||
|
||||
// Clean up
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await page.getByRole('textbox', { name: 'Search by name, description, or tags...' }).fill('Sample Title');
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
await page.getByRole('tooltip').getByText('Delete dashboard').click();
|
||||
await page.getByRole('button', { name: 'Delete' }).click();
|
||||
});
|
||||
|
||||
test('5.6 Import JSON dialog opens with code editor and upload button', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByRole('button', { name: 'New dashboard' }).click();
|
||||
await page.getByRole('menuitem', { name: 'Import JSON' }).click();
|
||||
|
||||
const dialog = page.getByRole('dialog');
|
||||
await expect(dialog).toBeVisible();
|
||||
await expect(dialog.getByText('Import Dashboard JSON')).toBeVisible();
|
||||
|
||||
// Monaco editor renders line numbers — line "1" is the presence signal
|
||||
await expect(dialog.getByText('1').first()).toBeVisible();
|
||||
await expect(dialog.getByRole('button', { name: 'Upload JSON file' })).toBeVisible();
|
||||
await expect(dialog.getByRole('button', { name: 'Import and Next' })).toBeVisible();
|
||||
});
|
||||
|
||||
test('5.7 Import JSON dialog closes on Escape without creating a dashboard', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByRole('button', { name: 'New dashboard' }).click();
|
||||
await page.getByRole('menuitem', { name: 'Import JSON' }).click();
|
||||
await expect(page.getByRole('dialog')).toBeVisible();
|
||||
|
||||
await page.keyboard.press('Escape');
|
||||
|
||||
await expect(page.getByRole('dialog')).not.toBeVisible();
|
||||
await expect(page).toHaveURL(/\/dashboard($|\?)/);
|
||||
});
|
||||
|
||||
test('5.8 Import JSON dialog closes on clicking the × button', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByRole('button', { name: 'New dashboard' }).click();
|
||||
await page.getByRole('menuitem', { name: 'Import JSON' }).click();
|
||||
|
||||
const dialog = page.getByRole('dialog');
|
||||
await expect(dialog).toBeVisible();
|
||||
|
||||
// The close button is a button with accessible name containing "close" or "×"
|
||||
await dialog.getByRole('button', { name: /close/i }).click();
|
||||
|
||||
await expect(dialog).not.toBeVisible();
|
||||
await expect(page).toHaveURL(/\/dashboard($|\?)/);
|
||||
});
|
||||
|
||||
// ─── 6. Deleting Dashboards ───────────────────────────────────────────────
|
||||
//
|
||||
// Only Admin can delete. Each test creates its own disposable dashboard
|
||||
// so no pre-existing data is affected.
|
||||
//
|
||||
// Known behaviour: clicking Cancel in the confirmation dialog navigates to
|
||||
// the dashboard detail page rather than staying on the list — tests account
|
||||
// for this rather than asserting we stay on /dashboard.
|
||||
|
||||
test('6.1 Delete confirmation dialog shows dashboard name with Cancel and Delete buttons', async ({ authedPage: page }) => {
|
||||
// Create a disposable dashboard to delete
|
||||
const name = `Delete Test ${Date.now()}`;
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await page.getByRole('textbox', { name: 'Enter dashboard name...' }).fill(name);
|
||||
await page.getByRole('button', { name: 'Submit' }).click();
|
||||
await expect(page).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
|
||||
// Return to the list and open delete dialog for the dashboard we just created
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await page.getByRole('textbox', { name: 'Search by name, description, or tags...' }).fill(name);
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
await page.getByRole('tooltip').getByText('Delete dashboard').click();
|
||||
|
||||
const dialog = page.getByRole('dialog');
|
||||
await expect(dialog).toBeVisible();
|
||||
|
||||
// Dialog heading contains the dashboard name
|
||||
await expect(dialog.getByRole('heading')).toContainText('Are you sure you want to delete the');
|
||||
await expect(dialog.getByRole('heading')).toContainText(name);
|
||||
|
||||
// Both action buttons are present
|
||||
await expect(dialog.getByRole('button', { name: 'Cancel' })).toBeVisible();
|
||||
await expect(dialog.getByRole('button', { name: 'Delete' })).toBeVisible();
|
||||
|
||||
// Clean up — confirm delete
|
||||
await dialog.getByRole('button', { name: 'Delete' }).click();
|
||||
});
|
||||
|
||||
test('6.2 Cancelling delete navigates to the dashboard detail page (known behaviour)', async ({ authedPage: page }) => {
|
||||
// Create a disposable dashboard
|
||||
const name = `Cancel Delete Test ${Date.now()}`;
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await page.getByRole('textbox', { name: 'Enter dashboard name...' }).fill(name);
|
||||
await page.getByRole('button', { name: 'Submit' }).click();
|
||||
await expect(page).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await page.getByRole('textbox', { name: 'Search by name, description, or tags...' }).fill(name);
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
await page.getByRole('tooltip').getByText('Delete dashboard').click();
|
||||
await expect(page.getByRole('dialog')).toBeVisible();
|
||||
|
||||
// Cancel — known behaviour: lands on detail page, not back on the list
|
||||
await page.getByRole('button', { name: 'Cancel' }).click();
|
||||
await expect(page).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
|
||||
// Clean up — delete the dashboard we created
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await page.getByRole('textbox', { name: 'Search by name, description, or tags...' }).fill(name);
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
await page.getByRole('tooltip').getByText('Delete dashboard').click();
|
||||
await page.getByRole('button', { name: 'Delete' }).click();
|
||||
});
|
||||
|
||||
test('6.3 Confirming delete removes the dashboard from the list', async ({ authedPage: page }) => {
|
||||
// Create a disposable dashboard
|
||||
const name = `Confirm Delete Test ${Date.now()}`;
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await page.getByRole('textbox', { name: 'Enter dashboard name...' }).fill(name);
|
||||
await page.getByRole('button', { name: 'Submit' }).click();
|
||||
await expect(page).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
|
||||
// Return to list, find the dashboard, and delete it
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await page.getByRole('textbox', { name: 'Search by name, description, or tags...' }).fill(name);
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
await page.getByRole('tooltip').getByText('Delete dashboard').click();
|
||||
await page.getByRole('button', { name: 'Delete' }).click();
|
||||
|
||||
// After deletion, searching for the name should return no results
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await page.getByRole('textbox', { name: 'Search by name, description, or tags...' }).fill(name);
|
||||
await expect(page.getByAltText('dashboard-image')).toHaveCount(0);
|
||||
});
|
||||
|
||||
// ─── 7. Row Click Navigation ──────────────────────────────────────────────
|
||||
//
|
||||
// Clicking anywhere on a dashboard row (except the action icon) navigates
|
||||
// to the detail page. Runs as @viewer since all roles can navigate.
|
||||
|
||||
test('7.1 Clicking a dashboard row navigates to the detail page', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Click the thumbnail image — a stable, always-present click target
|
||||
// that is not the action icon
|
||||
await page.getByAltText('dashboard-image').first().click();
|
||||
|
||||
// UUID in the path confirms we landed on a detail page
|
||||
await expect(page).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
});
|
||||
|
||||
test('7.2 Dashboard detail page shows the breadcrumb after row click', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByAltText('dashboard-image').first().click();
|
||||
await expect(page).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
|
||||
// Breadcrumb "Dashboard /" confirms correct page structure loaded
|
||||
await expect(page.getByRole('button', { name: /Dashboard \// })).toBeVisible();
|
||||
});
|
||||
|
||||
test('7.3 Sidebar Dashboards link navigates to the list page', async ({ authedPage: page }) => {
|
||||
// Start on a different page so the navigation is meaningful
|
||||
await page.goto('/home');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'hidden' });
|
||||
|
||||
// Click the Dashboards entry in the left sidebar
|
||||
await page.getByRole('link', { name: 'Dashboards' }).click();
|
||||
|
||||
await expect(page).toHaveURL(/\/dashboard/);
|
||||
await expect(page).toHaveTitle('SigNoz | All Dashboards');
|
||||
});
|
||||
|
||||
// ─── 8. URL State and Deep Linking ───────────────────────────────────────
|
||||
//
|
||||
// Search term persists in the URL (?search=<term>) and is restored on direct
|
||||
// navigation. Sort params (columnKey + order) appear only after the user
|
||||
// clicks the sort button — not on fresh load.
|
||||
|
||||
test('8.1 Direct navigation with ?search= pre-fills the input and filters results', async ({ authedPage: page }) => {
|
||||
// Navigate directly with the search param — simulates opening a shared link
|
||||
await page.goto('/dashboard?search=PromQL');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Input must be pre-filled with the param value
|
||||
await expect(page.getByRole('textbox', { name: 'Search by name, description, or tags...' })).toHaveValue('PromQL');
|
||||
|
||||
// Matching dashboard must be visible
|
||||
await expect(page.getByText('PromQL and Clickhouse SQL').first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('8.2 Search term updates the URL in real time', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByRole('textbox', { name: 'Search by name, description, or tags...' }).fill('APM');
|
||||
|
||||
// URL must reflect the typed term immediately
|
||||
await expect(page).toHaveURL(/search=APM/);
|
||||
});
|
||||
|
||||
test('8.3 Browser Back after navigating to a dashboard restores search state', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard?search=APM');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Navigate into a dashboard row
|
||||
await page.getByAltText('dashboard-image').first().click();
|
||||
await expect(page).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
|
||||
// Browser back should restore the list with the search param intact
|
||||
await page.goBack();
|
||||
await expect(page).toHaveURL(/search=APM/);
|
||||
await expect(page.getByRole('textbox', { name: 'Search by name, description, or tags...' })).toHaveValue('APM');
|
||||
});
|
||||
|
||||
test('8.4 Sort params appear in URL only after interacting with the sort button', async ({ authedPage: page }) => {
|
||||
// Fresh load — no sort params
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await expect(page).not.toHaveURL(/columnKey/);
|
||||
|
||||
// After clicking sort — params appear
|
||||
await page.getByTestId('sort-by').click();
|
||||
await expect(page).toHaveURL(/columnKey=updatedAt/);
|
||||
await expect(page).toHaveURL(/order=descend/);
|
||||
|
||||
// Navigating directly with sort params should honour them on load
|
||||
await page.goto('/dashboard?columnKey=updatedAt&order=descend');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await expect(page).toHaveURL(/columnKey=updatedAt/);
|
||||
await expect(page).toHaveURL(/order=descend/);
|
||||
});
|
||||
|
||||
// ─── 9. Page Header Actions ───────────────────────────────────────────────
|
||||
//
|
||||
// The Feedback and Share buttons live in the top-right of the page header
|
||||
// and are visible to all roles. This section was absent from the originally
|
||||
// generated spec and is written from scratch based on live app observation.
|
||||
|
||||
test('9.1 Feedback button is visible and opens a feedback mechanism', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const feedbackButton = page.getByRole('button', { name: 'Feedback' });
|
||||
await expect(feedbackButton).toBeVisible();
|
||||
|
||||
// Clicking should trigger a feedback mechanism (modal, widget, or external link)
|
||||
// — we verify it is interactive without asserting the exact implementation
|
||||
await feedbackButton.click();
|
||||
await expect(page).toHaveURL(/\/dashboard/); // no unintended navigation
|
||||
});
|
||||
|
||||
test('9.2 Share button is visible and triggers a share action', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const shareButton = page.getByRole('button', { name: 'Share' });
|
||||
await expect(shareButton).toBeVisible();
|
||||
|
||||
await shareButton.click();
|
||||
|
||||
// Clicking Share either opens a dialog or copies the URL — either way the
|
||||
// page should remain on /dashboard with no unintended navigation
|
||||
await expect(page).toHaveURL(/\/dashboard/);
|
||||
});
|
||||
|
||||
// ─── 10. Edge Cases and Error Handling ───────────────────────────────────
|
||||
//
|
||||
// Boundary conditions: tag overflow rendering, tagless rows, pagination
|
||||
// reset on search, and role-based visibility for Viewer.
|
||||
|
||||
test('10.1 Dashboards with many tags show a +N overflow indicator', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// The APM Metrics dashboard has 4 tags (apm, latency, error rate, throughput).
|
||||
// The list renders a subset inline and overflows the rest as "+ N".
|
||||
// We search for it to bring it to the top and inspect the row.
|
||||
await page.getByRole('textbox', { name: 'Search by name, description, or tags...' }).fill('APM Metrics');
|
||||
await page.getByAltText('dashboard-image').first().waitFor({ state: 'visible' });
|
||||
|
||||
// At least one "+ N" overflow indicator must be visible somewhere in the list
|
||||
await expect(page.getByText(/^\+\s*\d+$/).first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('10.2 Dashboards with no tags show a clean row with no empty tag containers', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// "PromQL and Clickhouse SQL" has no tags — search to bring it to top
|
||||
await page.getByRole('textbox', { name: 'Search by name, description, or tags...' }).fill('PromQL and Clickhouse SQL');
|
||||
await page.getByAltText('dashboard-image').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Row must be visible with thumbnail and text — no broken layout
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
await expect(page.getByText('PromQL and Clickhouse SQL').first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('10.3 Searching while on page 2 resets pagination to page 1', async ({ authedPage: page }) => {
|
||||
// Pre-condition: staging workspace has more than 20 dashboards
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Navigate to page 2
|
||||
await page.getByRole('button', { name: '2' }).click();
|
||||
await expect(page).toHaveURL(/page=2/);
|
||||
|
||||
// Typing a search term should reset back to page 1
|
||||
await page.getByRole('textbox', { name: 'Search by name, description, or tags...' }).fill('APM');
|
||||
await expect(page).not.toHaveURL(/page=2/);
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('10.4 Viewer cannot see create controls or row action icons', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Create controls must be absent for Viewer
|
||||
await expect(page.getByRole('textbox', { name: 'Enter dashboard name...' })).not.toBeVisible();
|
||||
await expect(page.getByRole('button', { name: 'Submit' })).not.toBeVisible();
|
||||
await expect(page.getByRole('button', { name: 'New dashboard' })).not.toBeVisible();
|
||||
|
||||
// Row action icons must be absent for Viewer
|
||||
await expect(page.getByTestId('dashboard-action-icon')).toHaveCount(0);
|
||||
|
||||
// Core read-only features still work
|
||||
await expect(page.getByRole('textbox', { name: 'Search by name, description, or tags...' })).toBeVisible();
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
});
|
||||
});
|
||||
163
tests/e2e/legacy/home/home.spec.ts
Normal file
163
tests/e2e/legacy/home/home.spec.ts
Normal file
@@ -0,0 +1,163 @@
|
||||
import { test, expect } from '../../fixtures/auth';
|
||||
|
||||
test.describe('Home Page - Page Load', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
await page.goto('/home', { waitUntil: 'domcontentloaded' });
|
||||
await expect(
|
||||
page.getByRole('heading', {
|
||||
name: 'Hello there, Welcome to your SigNoz workspace',
|
||||
}),
|
||||
).toBeVisible({ timeout: 30000 });
|
||||
});
|
||||
|
||||
test('TC-01: home page loads after login', async ({ authedPage: page }) => {
|
||||
await expect(page).toHaveURL(/\/home/);
|
||||
await expect(page).toHaveTitle(/Home/);
|
||||
await expect(
|
||||
page.getByRole('heading', {
|
||||
name: 'Hello there, Welcome to your SigNoz workspace',
|
||||
}),
|
||||
).toBeVisible();
|
||||
});
|
||||
|
||||
test('TC-02: ingestion status banners are visible', async ({ authedPage: page }) => {
|
||||
await expect(page.getByText('Logs ingestion is active')).toBeVisible();
|
||||
await expect(page.getByText('Traces ingestion is active')).toBeVisible();
|
||||
await expect(page.getByText('Metrics ingestion is active')).toBeVisible();
|
||||
});
|
||||
});
|
||||
|
||||
test.describe('Home Page - Explore Quick Actions', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
await page.goto('/home', { waitUntil: 'domcontentloaded' });
|
||||
await expect(
|
||||
page.getByRole('heading', {
|
||||
name: 'Hello there, Welcome to your SigNoz workspace',
|
||||
}),
|
||||
).toBeVisible({ timeout: 30000 });
|
||||
});
|
||||
|
||||
test('TC-03: Explore Logs navigates to logs explorer', async ({ authedPage: page }) => {
|
||||
await page.getByRole('button', { name: 'Explore Logs' }).click();
|
||||
await expect(page).toHaveURL(/\/logs\/logs-explorer/);
|
||||
});
|
||||
|
||||
test('TC-04: Explore Traces navigates to traces explorer', async ({ authedPage: page }) => {
|
||||
await page.getByRole('button', { name: 'Explore Traces' }).click();
|
||||
await expect(page).toHaveURL(/traces-explorer/);
|
||||
});
|
||||
|
||||
test('TC-05: Explore Metrics navigates to metrics explorer', async ({ authedPage: page }) => {
|
||||
await page.getByRole('button', { name: 'Explore Metrics' }).click();
|
||||
await expect(page).toHaveURL(/metrics-explorer/);
|
||||
});
|
||||
|
||||
test('TC-06: Open Logs Explorer shortcut navigates', async ({ authedPage: page }) => {
|
||||
await page.getByRole('button', { name: 'Open Logs Explorer' }).click();
|
||||
await expect(page).toHaveURL(/\/logs\/logs-explorer/);
|
||||
});
|
||||
|
||||
test('TC-07: Open Traces Explorer shortcut navigates', async ({ authedPage: page }) => {
|
||||
await page.getByRole('button', { name: 'Open Traces Explorer' }).click();
|
||||
await expect(page).toHaveURL(/traces-explorer/);
|
||||
});
|
||||
|
||||
test('TC-08: Open Metrics Explorer shortcut navigates', async ({ authedPage: page }) => {
|
||||
await page.getByRole('button', { name: 'Open Metrics Explorer' }).click();
|
||||
await expect(page).toHaveURL(/metrics-explorer/);
|
||||
});
|
||||
|
||||
test('TC-09: Create dashboard button navigates', async ({ authedPage: page }) => {
|
||||
await page.getByRole('button', { name: 'Create dashboard' }).click();
|
||||
await expect(page).toHaveURL(/\/dashboard/);
|
||||
});
|
||||
|
||||
test('TC-10: Create an alert button navigates', async ({ authedPage: page }) => {
|
||||
await page.getByRole('button', { name: 'Create an alert' }).click();
|
||||
await expect(page).toHaveURL(/\/alerts/);
|
||||
});
|
||||
});
|
||||
|
||||
test.describe('Home Page - Services Widget', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
await page.goto('/home', { waitUntil: 'domcontentloaded' });
|
||||
await expect(page.getByRole('columnheader', { name: 'APPLICATION' })).toBeVisible({ timeout: 30000 });
|
||||
});
|
||||
|
||||
test('TC-11: services table is visible with correct columns', async ({ authedPage: page }) => {
|
||||
await expect(page.getByRole('columnheader', { name: 'APPLICATION' })).toBeVisible();
|
||||
await expect(page.getByRole('columnheader', { name: /P99 LATENCY/i })).toBeVisible();
|
||||
await expect(page.getByRole('columnheader', { name: /ERROR RATE/i })).toBeVisible();
|
||||
await expect(page.getByRole('columnheader', { name: /OPS \/ SEC/i })).toBeVisible();
|
||||
await expect(page.getByRole('rowgroup').last().getByRole('row').first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('TC-12: All Services link navigates', async ({ authedPage: page }) => {
|
||||
await page.getByRole('link', { name: 'All Services' }).click();
|
||||
await expect(page).toHaveURL(/\/services/);
|
||||
});
|
||||
});
|
||||
|
||||
test.describe('Home Page - Alerts Widget', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
await page.goto('/home', { waitUntil: 'domcontentloaded' });
|
||||
await expect(page.getByRole('link', { name: 'All Alert Rules' })).toBeVisible({ timeout: 30000 });
|
||||
});
|
||||
|
||||
test('TC-13: alerts section shows firing alerts', async ({ authedPage: page }) => {
|
||||
await expect(page.getByRole('link', { name: 'All Alert Rules' })).toBeVisible();
|
||||
await expect(page.getByRole('button', { name: /alert-rules/ }).first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('TC-14: All Alert Rules link navigates', async ({ authedPage: page }) => {
|
||||
await page.getByRole('link', { name: 'All Alert Rules' }).click();
|
||||
await expect(page).toHaveURL(/\/alerts/);
|
||||
});
|
||||
});
|
||||
|
||||
test.describe('Home Page - Dashboards Widget', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
await page.goto('/home', { waitUntil: 'domcontentloaded' });
|
||||
await expect(page.getByRole('link', { name: 'All Dashboards' })).toBeVisible({ timeout: 30000 });
|
||||
});
|
||||
|
||||
test('TC-15: dashboards section shows recent dashboards', async ({ authedPage: page }) => {
|
||||
await expect(page.getByRole('link', { name: 'All Dashboards' })).toBeVisible();
|
||||
await expect(page.getByRole('button', { name: /alert-rules/ }).first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('TC-16: All Dashboards link navigates', async ({ authedPage: page }) => {
|
||||
await page.getByRole('link', { name: 'All Dashboards' }).click();
|
||||
await expect(page).toHaveURL(/\/dashboard/);
|
||||
});
|
||||
});
|
||||
|
||||
test.describe('Home Page - Saved Views Widget', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
await page.goto('/home', { waitUntil: 'domcontentloaded' });
|
||||
await expect(page.getByRole('link', { name: 'All Views' })).toBeVisible({ timeout: 30000 });
|
||||
});
|
||||
|
||||
test('TC-17: saved views tabs switch between signal types', async ({ authedPage: page }) => {
|
||||
const logsTab = page.locator('button[value="logs"]');
|
||||
const tracesTab = page.locator('button[value="traces"]');
|
||||
const metricsTab = page.locator('button[value="metrics"]');
|
||||
|
||||
await expect(logsTab).toBeVisible();
|
||||
|
||||
await tracesTab.click();
|
||||
await expect(tracesTab).toBeVisible();
|
||||
|
||||
await metricsTab.click();
|
||||
await expect(metricsTab).toBeVisible();
|
||||
|
||||
await logsTab.click();
|
||||
await expect(logsTab).toBeVisible();
|
||||
});
|
||||
|
||||
test('TC-18: All Views link navigates to saved views', async ({ authedPage: page }) => {
|
||||
await page.locator('button[value="logs"]').click();
|
||||
await page.getByRole('link', { name: 'All Views' }).click();
|
||||
await expect(page).toHaveURL(/\/logs\/saved-views/);
|
||||
});
|
||||
});
|
||||
522
tests/e2e/legacy/roles/roles-listing.spec.ts
Normal file
522
tests/e2e/legacy/roles/roles-listing.spec.ts
Normal file
@@ -0,0 +1,522 @@
|
||||
import { test, expect } from '../../fixtures/auth';
|
||||
|
||||
test.describe('Roles Listing - Navigation and Access Control', () => {
|
||||
test(
|
||||
'Admin User Can Access Roles Page',
|
||||
async ({ authedPage: page }) => {
|
||||
|
||||
await page.goto('/settings/roles', {
|
||||
waitUntil: 'domcontentloaded',
|
||||
});
|
||||
|
||||
await expect(
|
||||
page.getByRole('heading', {
|
||||
name: 'Roles',
|
||||
exact: true,
|
||||
}),
|
||||
).toBeVisible({ timeout: 30000 });
|
||||
|
||||
await expect(page).toHaveURL(/.*\/settings\/roles/);
|
||||
|
||||
await expect(
|
||||
page.getByRole('searchbox', {
|
||||
name: 'Search for roles...',
|
||||
}),
|
||||
).toBeVisible({ timeout: 15000 });
|
||||
|
||||
const accessDenied = page.getByText('Access Denied');
|
||||
const permissionDenied = page.getByText('Permission denied');
|
||||
|
||||
const hasAccessDenied = await accessDenied.isVisible().catch(() => false);
|
||||
const hasPermissionDenied = await permissionDenied
|
||||
.isVisible()
|
||||
.catch(() => false);
|
||||
|
||||
expect(hasAccessDenied).toBe(false);
|
||||
expect(hasPermissionDenied).toBe(false);
|
||||
|
||||
await expect(page.getByRole('searchbox')).toBeVisible();
|
||||
|
||||
await expect(page.getByText('signoz-admin')).toBeVisible();
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
test.describe('Roles Listing - Page Layout and UI Components', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
await page.goto('/settings/roles');
|
||||
|
||||
await Promise.race([
|
||||
page
|
||||
.getByRole('searchbox', { name: 'Search for roles...' })
|
||||
.waitFor({ state: 'visible', timeout: 10000 }),
|
||||
page
|
||||
.getByText(/error|failed/i)
|
||||
.waitFor({ state: 'visible', timeout: 10000 }),
|
||||
]).catch(() => {});
|
||||
});
|
||||
|
||||
test(
|
||||
'Verify Roles Listing Page Layout',
|
||||
async ({ authedPage: page }) => {
|
||||
await expect(
|
||||
page.getByRole('heading', {
|
||||
name: 'Roles',
|
||||
exact: true,
|
||||
}),
|
||||
).toBeVisible();
|
||||
|
||||
const searchInput = page.getByRole('searchbox');
|
||||
await expect(searchInput).toBeVisible();
|
||||
|
||||
await expect(
|
||||
page.getByText('Name', { exact: true }).first(),
|
||||
).toBeVisible();
|
||||
await expect(
|
||||
page.getByText('Description', { exact: true }).first(),
|
||||
).toBeVisible();
|
||||
await expect(page.getByText('Updated At', { exact: true })).toBeVisible();
|
||||
await expect(page.getByText('Created At', { exact: true })).toBeVisible();
|
||||
|
||||
await expect(page.locator('body')).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Verify Table Structure',
|
||||
async ({ authedPage: page }) => {
|
||||
await expect(page.getByRole('searchbox')).toBeVisible();
|
||||
|
||||
const roleNames = [
|
||||
'signoz-admin',
|
||||
'signoz-editor',
|
||||
'signoz-viewer',
|
||||
'signoz-anonymous',
|
||||
];
|
||||
const firstRole = page.getByText(roleNames[0]);
|
||||
await expect(firstRole).toBeVisible();
|
||||
|
||||
await expect(
|
||||
page.getByRole('heading', { name: 'Managed roles' }),
|
||||
).toBeVisible();
|
||||
|
||||
await expect(page.getByText(/full administrative access/i)).toBeVisible();
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
test.describe('Roles Listing - Roles Display and Data Verification', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
await page.goto('/settings/roles');
|
||||
|
||||
// Wait for page to load
|
||||
await expect(
|
||||
page.getByRole('searchbox', { name: 'Search for roles...' }),
|
||||
).toBeVisible();
|
||||
});
|
||||
|
||||
test(
|
||||
'Verify API Response Matches UI Display',
|
||||
async ({ authedPage: page }) => {
|
||||
let apiResponse: any = null;
|
||||
|
||||
page.on('response', async (response) => {
|
||||
if (
|
||||
response.url().includes('/api/v1/roles') &&
|
||||
response.status() === 200
|
||||
) {
|
||||
apiResponse = await response.json();
|
||||
}
|
||||
});
|
||||
|
||||
await page.reload();
|
||||
|
||||
await page
|
||||
.getByRole('searchbox', { name: 'Search for roles...' })
|
||||
.waitFor({ state: 'visible', timeout: 10000 });
|
||||
|
||||
await page.waitForTimeout(1000);
|
||||
|
||||
expect(apiResponse).not.toBeNull();
|
||||
expect(apiResponse.status).toBe('success');
|
||||
|
||||
const rolesFromApi = apiResponse.data;
|
||||
expect(rolesFromApi).toBeDefined();
|
||||
expect(rolesFromApi.length).toBe(5);
|
||||
|
||||
for (const role of rolesFromApi) {
|
||||
if (role.name) {
|
||||
await expect(page.getByText(role.name)).toBeVisible();
|
||||
}
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Verify Role Categorization (Managed vs Custom)',
|
||||
async ({ authedPage: page }) => {
|
||||
await expect(page.getByRole('searchbox')).toBeVisible();
|
||||
|
||||
const managedRolesHeader = page.getByRole('heading', {
|
||||
name: 'Managed roles',
|
||||
});
|
||||
const customRolesHeader = page.getByRole('heading', {
|
||||
name: /Custom roles\s*\d+/,
|
||||
});
|
||||
|
||||
await expect(managedRolesHeader).toBeVisible();
|
||||
await expect(customRolesHeader).toBeVisible();
|
||||
|
||||
const headerText = await customRolesHeader.textContent();
|
||||
expect(headerText).toMatch(/Custom roles\s*\d+/);
|
||||
|
||||
await expect(page.getByText('signoz-admin')).toBeVisible();
|
||||
await expect(page.getByText('signoz-editor')).toBeVisible();
|
||||
await expect(page.getByText('signoz-viewer')).toBeVisible();
|
||||
await expect(page.getByText('custom-role-ui')).toBeVisible();
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
test.describe('Roles Listing - Search Functionality', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
await page.goto('/settings/roles');
|
||||
|
||||
// Wait for roles to load
|
||||
await page
|
||||
.getByRole('searchbox', { name: 'Search for roles...' })
|
||||
.waitFor({ state: 'visible', timeout: 10000 })
|
||||
.catch(() => {});
|
||||
});
|
||||
|
||||
test(
|
||||
'Search Roles by Name',
|
||||
async ({ authedPage: page }) => {
|
||||
await expect(page.getByText('signoz-admin')).toBeVisible();
|
||||
await expect(page.getByText('signoz-editor')).toBeVisible();
|
||||
await expect(page.getByText('signoz-viewer')).toBeVisible();
|
||||
|
||||
const searchInput = page.getByRole('searchbox', {
|
||||
name: 'Search for roles...',
|
||||
});
|
||||
await searchInput.fill('editor');
|
||||
|
||||
await page.waitForTimeout(300);
|
||||
|
||||
await expect(page.getByText('signoz-editor')).toBeVisible();
|
||||
|
||||
await searchInput.clear();
|
||||
await searchInput.fill(''); // Ensure it's empty
|
||||
await page.waitForTimeout(300);
|
||||
|
||||
await expect(page.getByText('signoz-admin')).toBeVisible();
|
||||
await expect(page.getByText('signoz-editor')).toBeVisible();
|
||||
await expect(page.getByText('signoz-viewer')).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Search Roles by Description',
|
||||
async ({ authedPage: page }) => {
|
||||
const searchInput = page.getByRole('searchbox', {
|
||||
name: 'Search for roles...',
|
||||
});
|
||||
await searchInput.fill('administrative');
|
||||
|
||||
await page.waitForTimeout(500);
|
||||
|
||||
await expect(page.getByText('signoz-admin')).toBeVisible();
|
||||
await expect(page.getByText(/full administrative access/i)).toBeVisible();
|
||||
|
||||
await expect(page.getByText('signoz-viewer')).toBeHidden();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Search with No Results',
|
||||
async ({ authedPage: page }) => {
|
||||
await expect(page.getByText('signoz-admin')).toBeVisible({
|
||||
timeout: 10000,
|
||||
});
|
||||
|
||||
const searchInput = page.getByRole('searchbox', {
|
||||
name: 'Search for roles...',
|
||||
});
|
||||
await searchInput.fill('NonExistentRole123XYZ');
|
||||
|
||||
await page.waitForTimeout(300);
|
||||
|
||||
const adminStillVisible = await page
|
||||
.getByText('signoz-admin')
|
||||
.isVisible()
|
||||
.catch(() => false);
|
||||
const editorStillVisible = await page
|
||||
.getByText('signoz-editor')
|
||||
.isVisible()
|
||||
.catch(() => false);
|
||||
const viewerStillVisible = await page
|
||||
.getByText('signoz-viewer')
|
||||
.isVisible()
|
||||
.catch(() => false);
|
||||
|
||||
// At least verify that not all roles are still visible (search had some effect)
|
||||
const allStillVisible =
|
||||
adminStillVisible && editorStillVisible && viewerStillVisible;
|
||||
expect(allStillVisible).toBe(false);
|
||||
|
||||
// 5. Clear search and verify roles reappear
|
||||
await searchInput.clear();
|
||||
await searchInput.fill('');
|
||||
await page.waitForTimeout(300);
|
||||
|
||||
await expect(page.getByText('signoz-admin')).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Search Case Sensitivity',
|
||||
async ({ authedPage: page }) => {
|
||||
const searchInput = page.getByRole('searchbox', {
|
||||
name: 'Search for roles...',
|
||||
});
|
||||
|
||||
await searchInput.fill('ADMIN');
|
||||
await page.waitForTimeout(300);
|
||||
await expect(page.getByText('signoz-admin')).toBeVisible();
|
||||
|
||||
await searchInput.clear();
|
||||
await searchInput.fill('admin');
|
||||
await page.waitForTimeout(300);
|
||||
await expect(page.getByText('signoz-admin')).toBeVisible();
|
||||
|
||||
await searchInput.clear();
|
||||
await searchInput.fill('AdMin');
|
||||
await page.waitForTimeout(300);
|
||||
await expect(page.getByText('signoz-admin')).toBeVisible();
|
||||
|
||||
await searchInput.clear();
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
test.describe('Roles Listing - Pagination Functionality', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
await page.goto('/settings/roles');
|
||||
await expect(
|
||||
page.getByRole('heading', { name: 'Roles', exact: true }),
|
||||
).toBeVisible({ timeout: 15000 });
|
||||
await expect(
|
||||
page.getByRole('searchbox', { name: 'Search for roles...' }),
|
||||
).toBeVisible({ timeout: 15000 });
|
||||
});
|
||||
|
||||
test(
|
||||
'Navigate Between Pages',
|
||||
async ({ authedPage: page }) => {
|
||||
const paginationList = page.getByRole('list').filter({ hasText: /\d/ });
|
||||
const hasPagination = await paginationList.isVisible().catch(() => false);
|
||||
|
||||
if (!hasPagination) {
|
||||
return;
|
||||
}
|
||||
|
||||
// 1. Verify pagination controls are visible
|
||||
await expect(paginationList).toBeVisible();
|
||||
|
||||
// 2. Note the first role displayed on page 1
|
||||
const page1HasAdmin = await page.getByText('signoz-admin').isVisible();
|
||||
|
||||
// 3. Click "Next" or page "2" in pagination
|
||||
const nextButton = page.getByRole('listitem').getByText('2');
|
||||
if (await nextButton.isVisible()) {
|
||||
await nextButton.click();
|
||||
} else {
|
||||
// Try clicking next arrow
|
||||
await page.getByRole('listitem').last().click();
|
||||
}
|
||||
|
||||
// 4. Wait for page to load
|
||||
await page.waitForTimeout(1000);
|
||||
|
||||
// 5. Observe roles on page 2
|
||||
const page2HasAdmin = await page.getByText('signoz-admin').isVisible();
|
||||
|
||||
// Verify different roles are shown (or same role is hidden if paging worked)
|
||||
expect(page2HasAdmin).not.toBe(page1HasAdmin);
|
||||
|
||||
// Verify URL updates with page parameter
|
||||
await expect(page).toHaveURL(/page=2/);
|
||||
|
||||
// 6. Click "Previous" or page "1"
|
||||
const prevButton = page.getByRole('listitem').getByText('1');
|
||||
if (await prevButton.isVisible()) {
|
||||
await prevButton.click();
|
||||
} else {
|
||||
// Try clicking previous arrow
|
||||
await page.getByRole('listitem').first().click();
|
||||
}
|
||||
|
||||
// 7. Wait and verify return to page 1
|
||||
await page.waitForTimeout(1000);
|
||||
await expect(page).toHaveURL(/page=1|\/roles(?!.*page)/);
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Pagination with Search Results',
|
||||
async ({ authedPage: page }) => {
|
||||
const paginationList = page.getByRole('list').filter({ hasText: /\d/ });
|
||||
const hasPagination = await paginationList.isVisible().catch(() => false);
|
||||
|
||||
if (!hasPagination) {
|
||||
return;
|
||||
}
|
||||
|
||||
const searchInput = page.getByRole('searchbox');
|
||||
await searchInput.fill('signoz');
|
||||
|
||||
await page.waitForTimeout(500);
|
||||
|
||||
const paginationAfterSearch = await paginationList
|
||||
.isVisible()
|
||||
.catch(() => false);
|
||||
|
||||
if (paginationAfterSearch) {
|
||||
const page2Button = page.getByRole('listitem').getByText('2');
|
||||
if (await page2Button.isVisible()) {
|
||||
await page2Button.click();
|
||||
await page.waitForTimeout(500);
|
||||
|
||||
const url = page.url();
|
||||
expect(url).toContain('page=2');
|
||||
}
|
||||
}
|
||||
|
||||
await searchInput.clear();
|
||||
await page.waitForTimeout(500);
|
||||
|
||||
await expect(paginationList).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Pagination State Persistence',
|
||||
async ({ authedPage: page }) => {
|
||||
const paginationList = page.getByRole('list').filter({ hasText: /\d/ });
|
||||
const hasPagination = await paginationList.isVisible().catch(() => false);
|
||||
|
||||
if (!hasPagination) {
|
||||
return;
|
||||
}
|
||||
|
||||
const page2Button = page.getByRole('listitem').getByText('2');
|
||||
if (await page2Button.isVisible()) {
|
||||
await page2Button.click();
|
||||
await page.waitForTimeout(500);
|
||||
|
||||
await expect(page).toHaveURL(/page=2/);
|
||||
|
||||
await page.reload();
|
||||
|
||||
await expect(page).toHaveURL(/page=2/);
|
||||
|
||||
await expect(
|
||||
page.getByRole('searchbox', {
|
||||
name: 'Search for roles...',
|
||||
}),
|
||||
).toBeVisible();
|
||||
}
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
test.describe('Roles Listing - Loading and Error States', () => {
|
||||
test(
|
||||
'Verify Loading State',
|
||||
async ({ authedPage: page }) => {
|
||||
await page.route('**/api/v1/roles', async (route) => {
|
||||
await new Promise((resolve) => setTimeout(resolve, 1000));
|
||||
route.continue();
|
||||
});
|
||||
|
||||
await page.goto('/settings/roles');
|
||||
|
||||
const loadingIndicators = [
|
||||
page.locator('[class*="skeleton"]'),
|
||||
page.locator('[class*="loading"]'),
|
||||
page.locator('[class*="spinner"]'),
|
||||
page.getByRole('progressbar'),
|
||||
];
|
||||
|
||||
for (const indicator of loadingIndicators) {
|
||||
if (await indicator.isVisible().catch(() => false)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
await expect(
|
||||
page.getByRole('searchbox', {
|
||||
name: 'Search for roles...',
|
||||
}),
|
||||
).toBeVisible({ timeout: 10000 });
|
||||
|
||||
await expect(page.getByRole('heading', { name: 'Roles' })).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Handle API Error State',
|
||||
async ({ authedPage: page }) => {
|
||||
await page.route('**/api/v1/roles', async (route) => {
|
||||
route.fulfill({
|
||||
status: 500,
|
||||
contentType: 'application/json',
|
||||
body: JSON.stringify({
|
||||
status: 'error',
|
||||
error: 'Internal Server Error',
|
||||
}),
|
||||
});
|
||||
});
|
||||
|
||||
await page.goto('/settings/roles');
|
||||
|
||||
await page.waitForTimeout(2000);
|
||||
|
||||
const hasRoles = await page
|
||||
.getByText('signoz-admin')
|
||||
.isVisible()
|
||||
.catch(() => false);
|
||||
|
||||
if (!hasRoles) {
|
||||
await expect(
|
||||
page.getByRole('heading', {
|
||||
name: 'Roles',
|
||||
exact: true,
|
||||
}),
|
||||
).toBeVisible();
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Handle Network Failure',
|
||||
async ({ authedPage: page }) => {
|
||||
await page.route('**/api/v1/roles', async (route) => {
|
||||
route.abort('failed');
|
||||
});
|
||||
|
||||
await page.goto('/settings/roles');
|
||||
|
||||
await page.waitForTimeout(2000);
|
||||
|
||||
const hasRoles = await page
|
||||
.getByText('signoz-admin')
|
||||
.isVisible()
|
||||
.catch(() => false);
|
||||
|
||||
expect(hasRoles).toBe(false);
|
||||
|
||||
await expect(page.locator('body')).toBeVisible();
|
||||
},
|
||||
);
|
||||
});
|
||||
45
tests/e2e/package.json
Normal file
45
tests/e2e/package.json
Normal file
@@ -0,0 +1,45 @@
|
||||
{
|
||||
"name": "signoz-frontend-automation",
|
||||
"version": "1.0.0",
|
||||
"description": "E2E tests for SigNoz frontend with Playwright",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"test": "playwright test",
|
||||
"test:staging": "SIGNOZ_E2E_BASE_URL=https://app.us.staging.signoz.cloud playwright test",
|
||||
"test:ui": "playwright test --ui",
|
||||
"test:headed": "playwright test --headed",
|
||||
"test:debug": "playwright test --debug",
|
||||
"test:chromium": "playwright test --project=chromium",
|
||||
"test:firefox": "playwright test --project=firefox",
|
||||
"test:webkit": "playwright test --project=webkit",
|
||||
"report": "playwright show-report artifacts/html",
|
||||
"codegen": "playwright codegen",
|
||||
"install:browsers": "playwright install",
|
||||
"install:cli": "npm install -g @playwright/cli@latest && playwright-cli install --skills",
|
||||
"lint": "eslint . --ext .ts,.js",
|
||||
"lint:fix": "eslint . --ext .ts,.js --fix",
|
||||
"typecheck": "tsc --noEmit"
|
||||
},
|
||||
"keywords": [
|
||||
"playwright",
|
||||
"e2e",
|
||||
"testing",
|
||||
"signoz"
|
||||
],
|
||||
"author": "",
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"@playwright/test": "^1.57.0-alpha-2025-10-09",
|
||||
"@types/node": "^20.0.0",
|
||||
"@typescript-eslint/eslint-plugin": "^6.0.0",
|
||||
"@typescript-eslint/parser": "^6.0.0",
|
||||
"dotenv": "^16.0.0",
|
||||
"eslint": "^9.26.0",
|
||||
"eslint-plugin-playwright": "^0.16.0",
|
||||
"typescript": "^5.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18.0.0",
|
||||
"yarn": ">=1.22.0"
|
||||
}
|
||||
}
|
||||
11
tests/e2e/playwright-cli.json
Normal file
11
tests/e2e/playwright-cli.json
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"browser": {
|
||||
"browserName": "chromium",
|
||||
"launchOptions": { "headless": true }
|
||||
},
|
||||
"timeouts": {
|
||||
"action": 5000,
|
||||
"navigation": 30000
|
||||
},
|
||||
"outputDir": ".playwright-cli"
|
||||
}
|
||||
61
tests/e2e/playwright.config.ts
Normal file
61
tests/e2e/playwright.config.ts
Normal file
@@ -0,0 +1,61 @@
|
||||
import { defineConfig, devices } from '@playwright/test';
|
||||
import dotenv from 'dotenv';
|
||||
import path from 'path';
|
||||
|
||||
// .env holds user-provided defaults (staging creds).
|
||||
// .env.local is written by tests/e2e/bootstrap/setup.py when the pytest
|
||||
// lifecycle brings the backend up locally; override=true so local-backend
|
||||
// coordinates win over any stale .env values. Subprocess-injected env
|
||||
// (e.g. when pytest shells out to `yarn test`) still takes priority —
|
||||
// dotenv doesn't touch vars that are already set in process.env.
|
||||
dotenv.config({ path: path.resolve(__dirname, '.env') });
|
||||
dotenv.config({ path: path.resolve(__dirname, '.env.local'), override: true });
|
||||
|
||||
export default defineConfig({
|
||||
testDir: './tests',
|
||||
|
||||
// All Playwright output lands under artifacts/. One subdir per reporter
|
||||
// plus test-results/ for per-test artifacts (traces/screenshots/videos).
|
||||
// CI can archive the whole dir with `tar czf artifacts.tgz tests/e2e/artifacts`.
|
||||
outputDir: 'artifacts/test-results',
|
||||
|
||||
// Run tests in parallel
|
||||
fullyParallel: true,
|
||||
|
||||
// Fail the build on CI if you accidentally left test.only
|
||||
forbidOnly: !!process.env.CI,
|
||||
|
||||
// Retry on CI only
|
||||
retries: process.env.CI ? 2 : 0,
|
||||
|
||||
// Workers
|
||||
workers: process.env.CI ? 2 : undefined,
|
||||
|
||||
// Reporter
|
||||
reporter: [
|
||||
['html', { outputFolder: 'artifacts/html', open: 'never' }],
|
||||
['json', { outputFile: 'artifacts/json/results.json' }],
|
||||
['list'],
|
||||
],
|
||||
|
||||
// Shared settings
|
||||
use: {
|
||||
baseURL:
|
||||
process.env.SIGNOZ_E2E_BASE_URL || 'https://app.us.staging.signoz.cloud',
|
||||
trace: 'on-first-retry',
|
||||
screenshot: 'only-on-failure',
|
||||
video: 'retain-on-failure',
|
||||
colorScheme: 'dark',
|
||||
locale: 'en-US',
|
||||
viewport: { width: 1280, height: 720 },
|
||||
},
|
||||
|
||||
// Browser projects. No project-level auth — specs opt in via the
|
||||
// authedPage fixture in tests/e2e/fixtures/auth.ts, which logs a user
|
||||
// in on first use and caches the resulting storageState per worker.
|
||||
projects: [
|
||||
{ name: 'chromium', use: devices['Desktop Chrome'] },
|
||||
{ name: 'firefox', use: devices['Desktop Firefox'] },
|
||||
{ name: 'webkit', use: devices['Desktop Safari'] },
|
||||
],
|
||||
});
|
||||
23
tests/e2e/tsconfig.json
Normal file
23
tests/e2e/tsconfig.json
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2020",
|
||||
"module": "commonjs",
|
||||
"moduleResolution": "bundler",
|
||||
"lib": ["ES2020"],
|
||||
"strict": true,
|
||||
"esModuleInterop": true,
|
||||
"skipLibCheck": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"resolveJsonModule": true,
|
||||
"types": ["node", "@playwright/test"],
|
||||
"paths": {
|
||||
"@tests/*": ["./tests/*"],
|
||||
"@utils/*": ["./utils/*"],
|
||||
"@specs/*": ["./specs/*"]
|
||||
},
|
||||
"outDir": "./dist",
|
||||
"rootDir": "."
|
||||
},
|
||||
"include": ["tests/**/*.ts", "utils/**/*.ts", "playwright.config.ts"],
|
||||
"exclude": ["node_modules", "dist"]
|
||||
}
|
||||
1480
tests/e2e/yarn.lock
Normal file
1480
tests/e2e/yarn.lock
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,18 +1,118 @@
|
||||
import base64
|
||||
import json
|
||||
import time
|
||||
from datetime import datetime, timedelta
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import List
|
||||
from typing import Callable, List
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.fs import get_testdata_file_path
|
||||
from fixtures.logger import setup_logger
|
||||
from fixtures.logs import Logs
|
||||
from fixtures.metrics import Metrics
|
||||
from fixtures.traces import Traces
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
|
||||
@pytest.fixture(name="create_alert_rule", scope="function")
|
||||
def create_alert_rule(
|
||||
signoz: types.SigNoz, get_token: Callable[[str, str], str]
|
||||
) -> Callable[[dict], str]:
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
rule_ids = []
|
||||
|
||||
def _create_alert_rule(rule_data: dict) -> str:
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/rules"),
|
||||
json=rule_data,
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert (
|
||||
response.status_code == HTTPStatus.OK
|
||||
), f"Failed to create rule, api returned {response.status_code} with response: {response.text}"
|
||||
rule_id = response.json()["data"]["id"]
|
||||
rule_ids.append(rule_id)
|
||||
return rule_id
|
||||
|
||||
def _delete_alert_rule(rule_id: str):
|
||||
logger.info("Deleting rule: %s", {"rule_id": rule_id})
|
||||
response = requests.delete(
|
||||
signoz.self.host_configs["8080"].get(f"/api/v1/rules/{rule_id}"),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
if response.status_code != HTTPStatus.OK:
|
||||
raise Exception( # pylint: disable=broad-exception-raised
|
||||
f"Failed to delete rule, api returned {response.status_code} with response: {response.text}"
|
||||
)
|
||||
|
||||
yield _create_alert_rule
|
||||
# delete the rule on cleanup
|
||||
for rule_id in rule_ids:
|
||||
try:
|
||||
_delete_alert_rule(rule_id)
|
||||
except Exception as e: # pylint: disable=broad-exception-caught
|
||||
logger.error("Error deleting rule: %s", {"rule_id": rule_id, "error": e})
|
||||
|
||||
|
||||
@pytest.fixture(name="insert_alert_data", scope="function")
|
||||
def insert_alert_data(
|
||||
insert_metrics: Callable[[List[Metrics]], None],
|
||||
insert_traces: Callable[[List[Traces]], None],
|
||||
insert_logs: Callable[[List[Logs]], None],
|
||||
) -> Callable[[List[types.AlertData]], None]:
|
||||
|
||||
def _insert_alert_data(
|
||||
alert_data_items: List[types.AlertData],
|
||||
base_time: datetime = None,
|
||||
) -> None:
|
||||
|
||||
metrics: List[Metrics] = []
|
||||
traces: List[Traces] = []
|
||||
logs: List[Logs] = []
|
||||
|
||||
now = base_time or datetime.now(tz=timezone.utc).replace(
|
||||
second=0, microsecond=0
|
||||
)
|
||||
|
||||
for data_item in alert_data_items:
|
||||
if data_item.type == "metrics":
|
||||
_metrics = Metrics.load_from_file(
|
||||
get_testdata_file_path(data_item.data_path),
|
||||
base_time=now,
|
||||
)
|
||||
metrics.extend(_metrics)
|
||||
elif data_item.type == "traces":
|
||||
_traces = Traces.load_from_file(
|
||||
get_testdata_file_path(data_item.data_path),
|
||||
base_time=now,
|
||||
)
|
||||
traces.extend(_traces)
|
||||
elif data_item.type == "logs":
|
||||
_logs = Logs.load_from_file(
|
||||
get_testdata_file_path(data_item.data_path),
|
||||
base_time=now,
|
||||
)
|
||||
logs.extend(_logs)
|
||||
|
||||
# Add data to ClickHouse if any data is present
|
||||
if len(metrics) > 0:
|
||||
insert_metrics(metrics)
|
||||
if len(traces) > 0:
|
||||
insert_traces(traces)
|
||||
if len(logs) > 0:
|
||||
insert_logs(logs)
|
||||
|
||||
yield _insert_alert_data
|
||||
|
||||
|
||||
def collect_webhook_firing_alerts(
|
||||
webhook_test_container: types.TestContainerDocker, notification_channel_name: str
|
||||
) -> List[types.FiringAlert]:
|
||||
@@ -1,5 +1,5 @@
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, List, Tuple
|
||||
from typing import Callable, Dict, List, Tuple
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
@@ -11,7 +11,7 @@ from wiremock.resources.mappings import (
|
||||
WireMockMatchers,
|
||||
)
|
||||
|
||||
from fixtures import dev, types
|
||||
from fixtures import reuse, types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -28,6 +28,8 @@ USER_VIEWER_NAME = "viewer"
|
||||
USER_VIEWER_EMAIL = "viewer@integration.test"
|
||||
USER_VIEWER_PASSWORD = "password123Z$"
|
||||
|
||||
USERS_BASE = "/api/v2/users"
|
||||
|
||||
|
||||
@pytest.fixture(name="create_user_admin", scope="package")
|
||||
def create_user_admin(
|
||||
@@ -55,7 +57,7 @@ def create_user_admin(
|
||||
def restore(cache: dict) -> types.Operation:
|
||||
return types.Operation(name=cache["name"])
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"create_user_admin",
|
||||
@@ -214,3 +216,108 @@ def add_license(
|
||||
)
|
||||
|
||||
assert response.json()["count"] == 1
|
||||
|
||||
|
||||
def create_active_user(
|
||||
signoz: types.SigNoz,
|
||||
admin_token: str,
|
||||
email: str,
|
||||
role: str,
|
||||
password: str,
|
||||
name: str = "",
|
||||
) -> str:
|
||||
"""Invite a user and activate via resetPassword. Returns user ID."""
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/invite"),
|
||||
json={"email": email, "role": role, "name": name},
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.CREATED, response.text
|
||||
invited_user = response.json()["data"]
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/resetPassword"),
|
||||
json={"password": password, "token": invited_user["token"]},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.NO_CONTENT, response.text
|
||||
|
||||
return invited_user["id"]
|
||||
|
||||
|
||||
def find_user_by_email(signoz: types.SigNoz, token: str, email: str) -> Dict:
|
||||
"""Find a user by email from the user list. Raises AssertionError if not found."""
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(USERS_BASE),
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.OK, response.text
|
||||
user = next((u for u in response.json()["data"] if u["email"] == email), None)
|
||||
assert user is not None, f"User with email '{email}' not found"
|
||||
return user
|
||||
|
||||
|
||||
def find_user_with_roles_by_email(signoz: types.SigNoz, token: str, email: str) -> Dict:
|
||||
"""Find a user by email and return UserWithRoles (user fields + userRoles).
|
||||
|
||||
Raises AssertionError if the user is not found.
|
||||
"""
|
||||
user = find_user_by_email(signoz, token, email)
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(f"{USERS_BASE}/{user['id']}"),
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.OK, response.text
|
||||
return response.json()["data"]
|
||||
|
||||
|
||||
def assert_user_has_role(data: Dict, role_name: str) -> None:
|
||||
"""Assert that a UserWithRoles response contains the expected managed role."""
|
||||
role_names = {ur["role"]["name"] for ur in data.get("userRoles", [])}
|
||||
assert role_name in role_names, f"Expected role '{role_name}' in {role_names}"
|
||||
|
||||
|
||||
def change_user_role(
|
||||
signoz: types.SigNoz,
|
||||
admin_token: str,
|
||||
user_id: str,
|
||||
old_role: str,
|
||||
new_role: str,
|
||||
) -> None:
|
||||
"""Change a user's role (remove old, assign new).
|
||||
|
||||
Role names should be managed role names (e.g. signoz-editor).
|
||||
"""
|
||||
# Get current roles to find the old role's ID
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(f"{USERS_BASE}/{user_id}/roles"),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.OK, response.text
|
||||
roles = response.json()["data"]
|
||||
|
||||
old_role_entry = next((r for r in roles if r["name"] == old_role), None)
|
||||
assert old_role_entry is not None, f"User does not have role '{old_role}'"
|
||||
|
||||
# Remove old role
|
||||
response = requests.delete(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"{USERS_BASE}/{user_id}/roles/{old_role_entry['id']}"
|
||||
),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.NO_CONTENT, response.text
|
||||
|
||||
# Assign new role
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(f"{USERS_BASE}/{user_id}/roles"),
|
||||
json={"name": new_role},
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.OK, response.text
|
||||
@@ -10,7 +10,7 @@ import pytest
|
||||
from testcontainers.clickhouse import ClickHouseContainer
|
||||
from testcontainers.core.container import Network
|
||||
|
||||
from fixtures import dev, types
|
||||
from fixtures import reuse, types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -256,7 +256,7 @@ def clickhouse(
|
||||
env=env,
|
||||
)
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"clickhouse",
|
||||
@@ -5,6 +5,13 @@ from typing import Callable
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
from wiremock.client import (
|
||||
HttpMethods,
|
||||
Mapping,
|
||||
MappingRequest,
|
||||
MappingResponse,
|
||||
WireMockMatchers,
|
||||
)
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
@@ -153,3 +160,140 @@ def create_cloud_integration_account(
|
||||
logger.info("Cleaned up test account: %s", account_id)
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
logger.info("Post-test delete cleanup failed: %s", exc)
|
||||
|
||||
|
||||
def deprecated_simulate_agent_checkin(
|
||||
signoz: types.SigNoz,
|
||||
admin_token: str,
|
||||
cloud_provider: str,
|
||||
account_id: str,
|
||||
cloud_account_id: str,
|
||||
) -> requests.Response:
|
||||
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/agent-check-in"
|
||||
|
||||
checkin_payload = {
|
||||
"account_id": account_id,
|
||||
"cloud_account_id": cloud_account_id,
|
||||
"data": {},
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
json=checkin_payload,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
if not response.ok:
|
||||
logger.error(
|
||||
"Agent check-in failed: %s, response: %s",
|
||||
response.status_code,
|
||||
response.text,
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def setup_create_account_mocks(
|
||||
signoz: types.SigNoz,
|
||||
make_http_mocks: Callable,
|
||||
) -> None:
|
||||
"""Set up Zeus and Gateway mocks required by the CreateAccount endpoint."""
|
||||
make_http_mocks(
|
||||
signoz.zeus,
|
||||
[
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.GET,
|
||||
url="/v2/deployments/me",
|
||||
headers={
|
||||
"X-Signoz-Cloud-Api-Key": {
|
||||
WireMockMatchers.EQUAL_TO: "secret-key"
|
||||
}
|
||||
},
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": {
|
||||
"name": "test-deployment",
|
||||
"cluster": {"region": {"dns": "test.signoz.cloud"}},
|
||||
},
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
)
|
||||
],
|
||||
)
|
||||
make_http_mocks(
|
||||
signoz.gateway,
|
||||
[
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.GET,
|
||||
url="/v1/workspaces/me/keys/search?name=aws-integration&page=1&per_page=10",
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": [],
|
||||
"_pagination": {"page": 1, "per_page": 10, "total": 0},
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
),
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.POST,
|
||||
url="/v1/workspaces/me/keys",
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": {
|
||||
"name": "aws-integration",
|
||||
"value": "test-ingestion-key-123456",
|
||||
},
|
||||
"error": "",
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
def simulate_agent_checkin(
|
||||
signoz: types.SigNoz,
|
||||
admin_token: str,
|
||||
cloud_provider: str,
|
||||
account_id: str,
|
||||
cloud_account_id: str,
|
||||
data: dict | None = None,
|
||||
) -> requests.Response:
|
||||
endpoint = f"/api/v1/cloud_integrations/{cloud_provider}/accounts/check_in"
|
||||
|
||||
checkin_payload = {
|
||||
"cloudIntegrationId": account_id,
|
||||
"providerAccountId": cloud_account_id,
|
||||
"data": data or {},
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
json=checkin_payload,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
if not response.ok:
|
||||
logger.error(
|
||||
"Agent check-in failed: %s, response: %s",
|
||||
response.status_code,
|
||||
response.text,
|
||||
)
|
||||
|
||||
return response
|
||||
79
tests/fixtures/dashboards.py
vendored
Normal file
79
tests/fixtures/dashboards.py
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
from http import HTTPStatus
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
|
||||
def create_dashboard(
|
||||
signoz: types.SigNoz,
|
||||
token: str,
|
||||
payload: Dict,
|
||||
*,
|
||||
timeout: int = 5,
|
||||
) -> str:
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/dashboards"),
|
||||
json=payload,
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
timeout=timeout,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.CREATED, (
|
||||
f"create_dashboard failed: {response.status_code} {response.text}"
|
||||
)
|
||||
return response.json()["data"]["id"]
|
||||
|
||||
|
||||
def list_dashboards(
|
||||
signoz: types.SigNoz,
|
||||
token: str,
|
||||
*,
|
||||
timeout: int = 5,
|
||||
) -> List[Dict]:
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/dashboards"),
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
timeout=timeout,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.OK, (
|
||||
f"list_dashboards failed: {response.status_code} {response.text}"
|
||||
)
|
||||
return response.json().get("data", []) or []
|
||||
|
||||
|
||||
def find_dashboard_by_title(
|
||||
signoz: types.SigNoz,
|
||||
token: str,
|
||||
title: str,
|
||||
) -> Optional[Dict]:
|
||||
for dashboard in list_dashboards(signoz, token):
|
||||
data = dashboard.get("data") or dashboard
|
||||
if data.get("title") == title:
|
||||
return dashboard
|
||||
return None
|
||||
|
||||
|
||||
def upsert_dashboard(
|
||||
signoz: types.SigNoz,
|
||||
token: str,
|
||||
payload: Dict,
|
||||
) -> str:
|
||||
"""
|
||||
Idempotent create. Looks up by title; if present, returns the existing
|
||||
dashboard id. Intended for warm-backend seed loops under `--reuse`.
|
||||
"""
|
||||
title = payload.get("title")
|
||||
if title:
|
||||
existing = find_dashboard_by_title(signoz, token, title)
|
||||
if existing is not None:
|
||||
dashboard_id = existing.get("id") or (existing.get("data") or {}).get("id")
|
||||
logger.info(
|
||||
"dashboard already present, skipping: %s",
|
||||
{"title": title, "id": dashboard_id},
|
||||
)
|
||||
return dashboard_id
|
||||
return create_dashboard(signoz, token, payload)
|
||||
@@ -1,3 +1,4 @@
|
||||
import os
|
||||
from typing import Any, Generator
|
||||
|
||||
import pytest
|
||||
@@ -13,3 +14,8 @@ def tmpfs(
|
||||
return tmp_path_factory.mktemp(basename)
|
||||
|
||||
yield _tmp
|
||||
|
||||
|
||||
def get_testdata_file_path(file: str) -> str:
|
||||
testdata_dir = os.path.join(os.path.dirname(__file__), "..", "testdata")
|
||||
return os.path.join(testdata_dir, file)
|
||||
@@ -12,7 +12,7 @@ from wiremock.client import (
|
||||
from wiremock.constants import Config
|
||||
from wiremock.testing.testcontainer import WireMockContainer
|
||||
|
||||
from fixtures import dev, types
|
||||
from fixtures import reuse, types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -63,7 +63,7 @@ def zeus(
|
||||
def restore(cache: dict) -> types.TestContainerDocker:
|
||||
return types.TestContainerDocker.from_cache(cache)
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"zeus",
|
||||
@@ -120,7 +120,7 @@ def gateway(
|
||||
def restore(cache: dict) -> types.TestContainerDocker:
|
||||
return types.TestContainerDocker.from_cache(cache)
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"gateway",
|
||||
@@ -11,7 +11,7 @@ from selenium.webdriver.support import expected_conditions as EC
|
||||
from selenium.webdriver.support.wait import WebDriverWait
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.idp import IDP_ROOT_PASSWORD, IDP_ROOT_USERNAME
|
||||
from fixtures.keycloak import IDP_ROOT_PASSWORD, IDP_ROOT_USERNAME
|
||||
|
||||
|
||||
@pytest.fixture(name="create_saml_client", scope="function")
|
||||
@@ -4,7 +4,7 @@ import pytest
|
||||
from testcontainers.core.container import Network
|
||||
from testcontainers.keycloak import KeycloakContainer
|
||||
|
||||
from fixtures import dev, types
|
||||
from fixtures import reuse, types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -80,7 +80,7 @@ def idp(
|
||||
container=container,
|
||||
)
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"idp",
|
||||
@@ -11,7 +11,7 @@ from ksuid import KsuidMs
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.fingerprint import LogsOrTracesFingerprint
|
||||
from fixtures.utils import parse_timestamp
|
||||
from fixtures.time import parse_timestamp
|
||||
|
||||
|
||||
class LogsResource(ABC):
|
||||
@@ -391,112 +391,126 @@ class Logs(ABC):
|
||||
return logs
|
||||
|
||||
|
||||
def insert_logs_to_clickhouse(conn, logs: List[Logs]) -> None:
|
||||
"""
|
||||
Insert logs into ClickHouse tables following the same logic as the Go exporter.
|
||||
Handles insertion into:
|
||||
- distributed_logs_v2 (main logs table)
|
||||
- distributed_logs_v2_resource (resource fingerprints)
|
||||
- distributed_tag_attributes_v2 (tag attributes)
|
||||
- distributed_logs_attribute_keys (attribute keys)
|
||||
- distributed_logs_resource_keys (resource keys)
|
||||
|
||||
Pure function so the seeder container can reuse the exact insert path
|
||||
used by the pytest fixture. `conn` is a clickhouse-connect Client.
|
||||
"""
|
||||
resources: List[LogsResource] = []
|
||||
for log in logs:
|
||||
resources.extend(log.resource)
|
||||
|
||||
if len(resources) > 0:
|
||||
conn.insert(
|
||||
database="signoz_logs",
|
||||
table="distributed_logs_v2_resource",
|
||||
data=[resource.np_arr() for resource in resources],
|
||||
column_names=[
|
||||
"labels",
|
||||
"fingerprint",
|
||||
"seen_at_ts_bucket_start",
|
||||
],
|
||||
)
|
||||
|
||||
tag_attributes: List[LogsTagAttributes] = []
|
||||
for log in logs:
|
||||
tag_attributes.extend(log.tag_attributes)
|
||||
|
||||
if len(tag_attributes) > 0:
|
||||
conn.insert(
|
||||
database="signoz_logs",
|
||||
table="distributed_tag_attributes_v2",
|
||||
data=[tag_attribute.np_arr() for tag_attribute in tag_attributes],
|
||||
)
|
||||
|
||||
attribute_keys: List[LogsResourceOrAttributeKeys] = []
|
||||
for log in logs:
|
||||
attribute_keys.extend(log.attribute_keys)
|
||||
|
||||
if len(attribute_keys) > 0:
|
||||
conn.insert(
|
||||
database="signoz_logs",
|
||||
table="distributed_logs_attribute_keys",
|
||||
data=[attribute_key.np_arr() for attribute_key in attribute_keys],
|
||||
)
|
||||
|
||||
resource_keys: List[LogsResourceOrAttributeKeys] = []
|
||||
for log in logs:
|
||||
resource_keys.extend(log.resource_keys)
|
||||
|
||||
if len(resource_keys) > 0:
|
||||
conn.insert(
|
||||
database="signoz_logs",
|
||||
table="distributed_logs_resource_keys",
|
||||
data=[resource_key.np_arr() for resource_key in resource_keys],
|
||||
)
|
||||
|
||||
conn.insert(
|
||||
database="signoz_logs",
|
||||
table="distributed_logs_v2",
|
||||
data=[log.np_arr() for log in logs],
|
||||
column_names=[
|
||||
"ts_bucket_start",
|
||||
"resource_fingerprint",
|
||||
"timestamp",
|
||||
"observed_timestamp",
|
||||
"id",
|
||||
"trace_id",
|
||||
"span_id",
|
||||
"trace_flags",
|
||||
"severity_text",
|
||||
"severity_number",
|
||||
"body",
|
||||
"attributes_string",
|
||||
"attributes_number",
|
||||
"attributes_bool",
|
||||
"resources_string",
|
||||
"scope_name",
|
||||
"scope_version",
|
||||
"scope_string",
|
||||
"resource",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
_LOGS_TABLES_TO_TRUNCATE = [
|
||||
"logs_v2",
|
||||
"logs_v2_resource",
|
||||
"tag_attributes_v2",
|
||||
"logs_attribute_keys",
|
||||
"logs_resource_keys",
|
||||
]
|
||||
|
||||
|
||||
def truncate_logs_tables(conn, cluster: str) -> None:
|
||||
"""Truncate all logs tables. Used by the pytest fixture teardown and by
|
||||
the seeder's DELETE /telemetry/logs endpoint."""
|
||||
for table in _LOGS_TABLES_TO_TRUNCATE:
|
||||
conn.query(
|
||||
f"TRUNCATE TABLE signoz_logs.{table} ON CLUSTER '{cluster}' SYNC"
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(name="insert_logs", scope="function")
|
||||
def insert_logs(
|
||||
clickhouse: types.TestContainerClickhouse,
|
||||
) -> Generator[Callable[[List[Logs]], None], Any, None]:
|
||||
def _insert_logs(logs: List[Logs]) -> None:
|
||||
"""
|
||||
Insert logs into ClickHouse tables following the same logic as the Go exporter.
|
||||
This function handles insertion into multiple tables:
|
||||
- distributed_logs_v2 (main logs table)
|
||||
- distributed_logs_v2_resource (resource fingerprints)
|
||||
- distributed_tag_attributes_v2 (tag attributes)
|
||||
- distributed_logs_attribute_keys (attribute keys)
|
||||
- distributed_logs_resource_keys (resource keys)
|
||||
"""
|
||||
resources: List[LogsResource] = []
|
||||
for log in logs:
|
||||
resources.extend(log.resource)
|
||||
|
||||
if len(resources) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_logs",
|
||||
table="distributed_logs_v2_resource",
|
||||
data=[resource.np_arr() for resource in resources],
|
||||
column_names=[
|
||||
"labels",
|
||||
"fingerprint",
|
||||
"seen_at_ts_bucket_start",
|
||||
],
|
||||
)
|
||||
|
||||
tag_attributes: List[LogsTagAttributes] = []
|
||||
for log in logs:
|
||||
tag_attributes.extend(log.tag_attributes)
|
||||
|
||||
if len(tag_attributes) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_logs",
|
||||
table="distributed_tag_attributes_v2",
|
||||
data=[tag_attribute.np_arr() for tag_attribute in tag_attributes],
|
||||
)
|
||||
|
||||
attribute_keys: List[LogsResourceOrAttributeKeys] = []
|
||||
for log in logs:
|
||||
attribute_keys.extend(log.attribute_keys)
|
||||
|
||||
if len(attribute_keys) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_logs",
|
||||
table="distributed_logs_attribute_keys",
|
||||
data=[attribute_key.np_arr() for attribute_key in attribute_keys],
|
||||
)
|
||||
|
||||
resource_keys: List[LogsResourceOrAttributeKeys] = []
|
||||
for log in logs:
|
||||
resource_keys.extend(log.resource_keys)
|
||||
|
||||
if len(resource_keys) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_logs",
|
||||
table="distributed_logs_resource_keys",
|
||||
data=[resource_key.np_arr() for resource_key in resource_keys],
|
||||
)
|
||||
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_logs",
|
||||
table="distributed_logs_v2",
|
||||
data=[log.np_arr() for log in logs],
|
||||
column_names=[
|
||||
"ts_bucket_start",
|
||||
"resource_fingerprint",
|
||||
"timestamp",
|
||||
"observed_timestamp",
|
||||
"id",
|
||||
"trace_id",
|
||||
"span_id",
|
||||
"trace_flags",
|
||||
"severity_text",
|
||||
"severity_number",
|
||||
"body",
|
||||
"attributes_string",
|
||||
"attributes_number",
|
||||
"attributes_bool",
|
||||
"resources_string",
|
||||
"scope_name",
|
||||
"scope_version",
|
||||
"scope_string",
|
||||
"resource",
|
||||
],
|
||||
)
|
||||
insert_logs_to_clickhouse(clickhouse.conn, logs)
|
||||
|
||||
yield _insert_logs
|
||||
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_logs.logs_v2 ON CLUSTER '{clickhouse.env['SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER']}' SYNC"
|
||||
)
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_logs.logs_v2_resource ON CLUSTER '{clickhouse.env['SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER']}' SYNC"
|
||||
)
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_logs.tag_attributes_v2 ON CLUSTER '{clickhouse.env['SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER']}' SYNC"
|
||||
)
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_logs.logs_attribute_keys ON CLUSTER '{clickhouse.env['SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER']}' SYNC"
|
||||
)
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_logs.logs_resource_keys ON CLUSTER '{clickhouse.env['SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER']}' SYNC"
|
||||
truncate_logs_tables(
|
||||
clickhouse.conn,
|
||||
clickhouse.env["SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER"],
|
||||
)
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import numpy as np
|
||||
import pytest
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.utils import parse_timestamp
|
||||
from fixtures.time import parse_timestamp
|
||||
|
||||
|
||||
class MetricsTimeSeries(ABC):
|
||||
@@ -417,151 +417,168 @@ class Metrics(ABC):
|
||||
return metrics
|
||||
|
||||
|
||||
def insert_metrics_to_clickhouse(conn, metrics: List[Metrics]) -> None:
|
||||
"""
|
||||
Insert metrics into ClickHouse tables.
|
||||
Handles insertion into:
|
||||
- distributed_time_series_v4 (time series metadata)
|
||||
- distributed_samples_v4 (actual sample values)
|
||||
- distributed_metadata (metric attribute metadata)
|
||||
|
||||
Pure function so the seeder container can reuse the exact insert path
|
||||
used by the pytest fixture. `conn` is a clickhouse-connect Client.
|
||||
"""
|
||||
time_series_map: dict[int, MetricsTimeSeries] = {}
|
||||
for metric in metrics:
|
||||
fp = int(metric.time_series.fingerprint)
|
||||
if fp not in time_series_map:
|
||||
time_series_map[fp] = metric.time_series
|
||||
|
||||
if len(time_series_map) > 0:
|
||||
conn.insert(
|
||||
database="signoz_metrics",
|
||||
table="distributed_time_series_v4",
|
||||
column_names=[
|
||||
"env",
|
||||
"temporality",
|
||||
"metric_name",
|
||||
"description",
|
||||
"unit",
|
||||
"type",
|
||||
"is_monotonic",
|
||||
"fingerprint",
|
||||
"unix_milli",
|
||||
"labels",
|
||||
"attrs",
|
||||
"scope_attrs",
|
||||
"resource_attrs",
|
||||
"__normalized",
|
||||
],
|
||||
data=[ts.to_row() for ts in time_series_map.values()],
|
||||
)
|
||||
|
||||
samples = [metric.sample for metric in metrics]
|
||||
if len(samples) > 0:
|
||||
conn.insert(
|
||||
database="signoz_metrics",
|
||||
table="distributed_samples_v4",
|
||||
column_names=[
|
||||
"env",
|
||||
"temporality",
|
||||
"metric_name",
|
||||
"fingerprint",
|
||||
"unix_milli",
|
||||
"value",
|
||||
"flags",
|
||||
],
|
||||
data=[sample.to_row() for sample in samples],
|
||||
)
|
||||
|
||||
# (metric_name, attr_type, attr_name, attr_value) -> MetricsMetadata
|
||||
metadata_map: dict[tuple, MetricsMetadata] = {}
|
||||
for metric in metrics:
|
||||
ts = metric.time_series
|
||||
for attr_name, attr_value in metric.labels.items():
|
||||
key = (ts.metric_name, "point", attr_name, str(attr_value))
|
||||
if key not in metadata_map:
|
||||
metadata_map[key] = MetricsMetadata(
|
||||
metric_name=ts.metric_name,
|
||||
attr_name=attr_name,
|
||||
attr_type="point",
|
||||
attr_datatype="String",
|
||||
attr_string_value=str(attr_value),
|
||||
timestamp=metric.timestamp,
|
||||
temporality=ts.temporality,
|
||||
description=ts.description,
|
||||
unit=ts.unit,
|
||||
type_=ts.type,
|
||||
is_monotonic=ts.is_monotonic,
|
||||
)
|
||||
for attr_name, attr_value in ts.resource_attrs.items():
|
||||
key = (ts.metric_name, "resource", attr_name, str(attr_value))
|
||||
if key not in metadata_map:
|
||||
metadata_map[key] = MetricsMetadata(
|
||||
metric_name=ts.metric_name,
|
||||
attr_name=attr_name,
|
||||
attr_type="resource",
|
||||
attr_datatype="String",
|
||||
attr_string_value=str(attr_value),
|
||||
timestamp=metric.timestamp,
|
||||
temporality=ts.temporality,
|
||||
description=ts.description,
|
||||
unit=ts.unit,
|
||||
type_=ts.type,
|
||||
is_monotonic=ts.is_monotonic,
|
||||
)
|
||||
for attr_name, attr_value in ts.scope_attrs.items():
|
||||
key = (ts.metric_name, "scope", attr_name, str(attr_value))
|
||||
if key not in metadata_map:
|
||||
metadata_map[key] = MetricsMetadata(
|
||||
metric_name=ts.metric_name,
|
||||
attr_name=attr_name,
|
||||
attr_type="scope",
|
||||
attr_datatype="String",
|
||||
attr_string_value=str(attr_value),
|
||||
timestamp=metric.timestamp,
|
||||
temporality=ts.temporality,
|
||||
description=ts.description,
|
||||
unit=ts.unit,
|
||||
type_=ts.type,
|
||||
is_monotonic=ts.is_monotonic,
|
||||
)
|
||||
|
||||
if len(metadata_map) > 0:
|
||||
conn.insert(
|
||||
database="signoz_metrics",
|
||||
table="distributed_metadata",
|
||||
column_names=[
|
||||
"temporality",
|
||||
"metric_name",
|
||||
"description",
|
||||
"unit",
|
||||
"type",
|
||||
"is_monotonic",
|
||||
"attr_name",
|
||||
"attr_type",
|
||||
"attr_datatype",
|
||||
"attr_string_value",
|
||||
"first_reported_unix_milli",
|
||||
"last_reported_unix_milli",
|
||||
],
|
||||
data=[m.to_row() for m in metadata_map.values()],
|
||||
)
|
||||
|
||||
|
||||
_METRICS_TABLES_TO_TRUNCATE = [
|
||||
"time_series_v4",
|
||||
"samples_v4",
|
||||
"exp_hist",
|
||||
"metadata",
|
||||
]
|
||||
|
||||
|
||||
def truncate_metrics_tables(conn, cluster: str) -> None:
|
||||
"""Truncate all metrics tables. Used by the pytest fixture teardown and by
|
||||
the seeder's DELETE /telemetry/metrics endpoint."""
|
||||
for table in _METRICS_TABLES_TO_TRUNCATE:
|
||||
conn.query(
|
||||
f"TRUNCATE TABLE signoz_metrics.{table} ON CLUSTER '{cluster}' SYNC"
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(name="insert_metrics", scope="function")
|
||||
def insert_metrics(
|
||||
clickhouse: types.TestContainerClickhouse,
|
||||
) -> Generator[Callable[[List[Metrics]], None], Any, None]:
|
||||
def _insert_metrics(metrics: List[Metrics]) -> None:
|
||||
"""
|
||||
Insert metrics into ClickHouse tables.
|
||||
This function handles insertion into:
|
||||
- distributed_time_series_v4 (time series metadata)
|
||||
- distributed_samples_v4 (actual sample values)
|
||||
- distributed_metadata (metric attribute metadata)
|
||||
"""
|
||||
time_series_map: dict[int, MetricsTimeSeries] = {}
|
||||
for metric in metrics:
|
||||
fp = int(metric.time_series.fingerprint)
|
||||
if fp not in time_series_map:
|
||||
time_series_map[fp] = metric.time_series
|
||||
|
||||
if len(time_series_map) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_metrics",
|
||||
table="distributed_time_series_v4",
|
||||
column_names=[
|
||||
"env",
|
||||
"temporality",
|
||||
"metric_name",
|
||||
"description",
|
||||
"unit",
|
||||
"type",
|
||||
"is_monotonic",
|
||||
"fingerprint",
|
||||
"unix_milli",
|
||||
"labels",
|
||||
"attrs",
|
||||
"scope_attrs",
|
||||
"resource_attrs",
|
||||
"__normalized",
|
||||
],
|
||||
data=[ts.to_row() for ts in time_series_map.values()],
|
||||
)
|
||||
|
||||
samples = [metric.sample for metric in metrics]
|
||||
if len(samples) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_metrics",
|
||||
table="distributed_samples_v4",
|
||||
column_names=[
|
||||
"env",
|
||||
"temporality",
|
||||
"metric_name",
|
||||
"fingerprint",
|
||||
"unix_milli",
|
||||
"value",
|
||||
"flags",
|
||||
],
|
||||
data=[sample.to_row() for sample in samples],
|
||||
)
|
||||
|
||||
# (metric_name, attr_type, attr_name, attr_value) -> MetricsMetadata
|
||||
metadata_map: dict[tuple, MetricsMetadata] = {}
|
||||
for metric in metrics:
|
||||
ts = metric.time_series
|
||||
for attr_name, attr_value in metric.labels.items():
|
||||
key = (ts.metric_name, "point", attr_name, str(attr_value))
|
||||
if key not in metadata_map:
|
||||
metadata_map[key] = MetricsMetadata(
|
||||
metric_name=ts.metric_name,
|
||||
attr_name=attr_name,
|
||||
attr_type="point",
|
||||
attr_datatype="String",
|
||||
attr_string_value=str(attr_value),
|
||||
timestamp=metric.timestamp,
|
||||
temporality=ts.temporality,
|
||||
description=ts.description,
|
||||
unit=ts.unit,
|
||||
type_=ts.type,
|
||||
is_monotonic=ts.is_monotonic,
|
||||
)
|
||||
for attr_name, attr_value in ts.resource_attrs.items():
|
||||
key = (ts.metric_name, "resource", attr_name, str(attr_value))
|
||||
if key not in metadata_map:
|
||||
metadata_map[key] = MetricsMetadata(
|
||||
metric_name=ts.metric_name,
|
||||
attr_name=attr_name,
|
||||
attr_type="resource",
|
||||
attr_datatype="String",
|
||||
attr_string_value=str(attr_value),
|
||||
timestamp=metric.timestamp,
|
||||
temporality=ts.temporality,
|
||||
description=ts.description,
|
||||
unit=ts.unit,
|
||||
type_=ts.type,
|
||||
is_monotonic=ts.is_monotonic,
|
||||
)
|
||||
for attr_name, attr_value in ts.scope_attrs.items():
|
||||
key = (ts.metric_name, "scope", attr_name, str(attr_value))
|
||||
if key not in metadata_map:
|
||||
metadata_map[key] = MetricsMetadata(
|
||||
metric_name=ts.metric_name,
|
||||
attr_name=attr_name,
|
||||
attr_type="scope",
|
||||
attr_datatype="String",
|
||||
attr_string_value=str(attr_value),
|
||||
timestamp=metric.timestamp,
|
||||
temporality=ts.temporality,
|
||||
description=ts.description,
|
||||
unit=ts.unit,
|
||||
type_=ts.type,
|
||||
is_monotonic=ts.is_monotonic,
|
||||
)
|
||||
|
||||
if len(metadata_map) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_metrics",
|
||||
table="distributed_metadata",
|
||||
column_names=[
|
||||
"temporality",
|
||||
"metric_name",
|
||||
"description",
|
||||
"unit",
|
||||
"type",
|
||||
"is_monotonic",
|
||||
"attr_name",
|
||||
"attr_type",
|
||||
"attr_datatype",
|
||||
"attr_string_value",
|
||||
"first_reported_unix_milli",
|
||||
"last_reported_unix_milli",
|
||||
],
|
||||
data=[m.to_row() for m in metadata_map.values()],
|
||||
)
|
||||
insert_metrics_to_clickhouse(clickhouse.conn, metrics)
|
||||
|
||||
yield _insert_metrics
|
||||
|
||||
cluster = clickhouse.env["SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER"]
|
||||
tables_to_truncate = [
|
||||
"time_series_v4",
|
||||
"samples_v4",
|
||||
"exp_hist",
|
||||
"metadata",
|
||||
]
|
||||
for table in tables_to_truncate:
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_metrics.{table} ON CLUSTER '{cluster}' SYNC"
|
||||
)
|
||||
truncate_metrics_tables(
|
||||
clickhouse.conn,
|
||||
clickhouse.env["SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER"],
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(name="remove_metrics_ttl_and_storage_settings", scope="function")
|
||||
@@ -2,7 +2,7 @@ import docker
|
||||
import pytest
|
||||
from testcontainers.core.container import Network
|
||||
|
||||
from fixtures import dev, types
|
||||
from fixtures import reuse, types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -67,7 +67,7 @@ def migrator(
|
||||
def restore(cache: dict) -> types.Operation:
|
||||
return types.Operation(name=cache["name"])
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"migrator",
|
||||
@@ -3,7 +3,7 @@ import docker.errors
|
||||
import pytest
|
||||
from testcontainers.core.network import Network
|
||||
|
||||
from fixtures import dev, types
|
||||
from fixtures import reuse, types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -37,7 +37,7 @@ def network(
|
||||
nw = client.networks.get(network_id=existing.get("id"))
|
||||
return types.Network(id=nw.id, name=nw.name)
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"network",
|
||||
@@ -8,7 +8,7 @@ import requests
|
||||
from testcontainers.core.container import Network
|
||||
from wiremock.testing.testcontainer import WireMockContainer
|
||||
|
||||
from fixtures import dev, types
|
||||
from fixtures import reuse, types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
@@ -60,7 +60,7 @@ def notification_channel(
|
||||
def restore(cache: dict) -> types.TestContainerDocker:
|
||||
return types.TestContainerDocker.from_cache(cache)
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"notification_channel",
|
||||
@@ -5,7 +5,7 @@ from sqlalchemy import create_engine, sql
|
||||
from testcontainers.core.container import Network
|
||||
from testcontainers.postgres import PostgresContainer
|
||||
|
||||
from fixtures import dev, types
|
||||
from fixtures import reuse, types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -97,7 +97,7 @@ def postgres(
|
||||
env=env,
|
||||
)
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"postgres",
|
||||
@@ -1,10 +1,13 @@
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timedelta
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.logs import Logs
|
||||
from fixtures.traces import TraceIdGenerator, Traces, TracesKind, TracesStatusCode
|
||||
|
||||
DEFAULT_STEP_INTERVAL = 60 # seconds
|
||||
DEFAULT_TOLERANCE = 1e-9
|
||||
@@ -583,3 +586,251 @@ def assert_scalar_column_order(
|
||||
f"{context}: Column {column_index} order mismatch. "
|
||||
f"Expected {expected_values}, got {actual_values}"
|
||||
)
|
||||
|
||||
|
||||
def format_timestamp(dt: datetime) -> str:
|
||||
"""
|
||||
Format a datetime object to match the API's timestamp format.
|
||||
The API returns timestamps with minimal fractional seconds precision.
|
||||
Example: 2026-02-03T20:54:56.5Z for 500000 microseconds
|
||||
"""
|
||||
base_str = dt.strftime("%Y-%m-%dT%H:%M:%S")
|
||||
if dt.microsecond:
|
||||
# Convert microseconds to fractional seconds and strip trailing zeros
|
||||
fractional = f"{dt.microsecond / 1000000:.6f}"[2:].rstrip("0")
|
||||
return f"{base_str}.{fractional}Z"
|
||||
return f"{base_str}Z"
|
||||
|
||||
|
||||
def assert_identical_query_response(
|
||||
response1: requests.Response, response2: requests.Response
|
||||
) -> None:
|
||||
"""
|
||||
Assert that two query responses are identical in status and data.
|
||||
"""
|
||||
assert response1.status_code == response2.status_code, "Status codes do not match"
|
||||
if response1.status_code == HTTPStatus.OK:
|
||||
assert (
|
||||
response1.json()["status"] == response2.json()["status"]
|
||||
), "Response statuses do not match"
|
||||
assert (
|
||||
response1.json()["data"]["data"]["results"]
|
||||
== response2.json()["data"]["data"]["results"]
|
||||
), "Response data do not match"
|
||||
|
||||
|
||||
def generate_logs_with_corrupt_metadata() -> List[Logs]:
|
||||
"""
|
||||
Specifically, entries with 'id', 'timestamp', 'severity_text', 'severity_number' and 'body' fields in metadata
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
return [
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=4),
|
||||
body="POST /integration request received",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"timestamp": "corrupt_data",
|
||||
},
|
||||
attributes={
|
||||
"net.transport": "IP.TCP",
|
||||
"http.scheme": "http",
|
||||
"http.user_agent": "Integration Test",
|
||||
"http.request.method": "POST",
|
||||
"http.response.status_code": "200",
|
||||
"severity_text": "corrupt_data",
|
||||
"timestamp": "corrupt_data",
|
||||
},
|
||||
trace_id="1",
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=3),
|
||||
body="SELECT query executed",
|
||||
severity_text="DEBUG",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"severity_number": "corrupt_data",
|
||||
"id": "corrupt_data",
|
||||
},
|
||||
attributes={
|
||||
"db.name": "integration",
|
||||
"db.operation": "SELECT",
|
||||
"db.statement": "SELECT * FROM integration",
|
||||
"trace_id": "2",
|
||||
},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=2),
|
||||
body="HTTP PATCH failed with 404",
|
||||
severity_text="WARN",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"body": "corrupt_data",
|
||||
"trace_id": "3",
|
||||
},
|
||||
attributes={
|
||||
"http.request.method": "PATCH",
|
||||
"http.status_code": "404",
|
||||
"id": "1",
|
||||
},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=1),
|
||||
body="{'trace_id': '4'}",
|
||||
severity_text="ERROR",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "topic-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-001",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "001",
|
||||
},
|
||||
attributes={
|
||||
"message.type": "SENT",
|
||||
"messaging.operation": "publish",
|
||||
"messaging.message.id": "001",
|
||||
"body": "corrupt_data",
|
||||
"timestamp": "corrupt_data",
|
||||
},
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def generate_traces_with_corrupt_metadata() -> List[Traces]:
|
||||
"""
|
||||
Specifically, entries with 'id', 'timestamp', 'trace_id' and 'duration_nano' fields in metadata
|
||||
"""
|
||||
http_service_trace_id = TraceIdGenerator.trace_id()
|
||||
http_service_span_id = TraceIdGenerator.span_id()
|
||||
http_service_db_span_id = TraceIdGenerator.span_id()
|
||||
http_service_patch_span_id = TraceIdGenerator.span_id()
|
||||
topic_service_trace_id = TraceIdGenerator.trace_id()
|
||||
topic_service_span_id = TraceIdGenerator.span_id()
|
||||
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
return [
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=4),
|
||||
duration=timedelta(seconds=3),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_span_id,
|
||||
parent_span_id="",
|
||||
name="POST /integration",
|
||||
kind=TracesKind.SPAN_KIND_SERVER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"trace_id": "corrupt_data",
|
||||
},
|
||||
attributes={
|
||||
"net.transport": "IP.TCP",
|
||||
"http.scheme": "http",
|
||||
"http.user_agent": "Integration Test",
|
||||
"http.request.method": "POST",
|
||||
"http.response.status_code": "200",
|
||||
"timestamp": "corrupt_data",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=3.5),
|
||||
duration=timedelta(seconds=5),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_db_span_id,
|
||||
parent_span_id=http_service_span_id,
|
||||
name="SELECT",
|
||||
kind=TracesKind.SPAN_KIND_CLIENT,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"timestamp": "corrupt_data",
|
||||
},
|
||||
attributes={
|
||||
"db.name": "integration",
|
||||
"db.operation": "SELECT",
|
||||
"db.statement": "SELECT * FROM integration",
|
||||
"trace_d": "corrupt_data",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=3),
|
||||
duration=timedelta(seconds=1),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_patch_span_id,
|
||||
parent_span_id=http_service_span_id,
|
||||
name="HTTP PATCH",
|
||||
kind=TracesKind.SPAN_KIND_CLIENT,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"duration_nano": "corrupt_data",
|
||||
},
|
||||
attributes={
|
||||
"http.request.method": "PATCH",
|
||||
"http.status_code": "404",
|
||||
"id": "1",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=1),
|
||||
duration=timedelta(seconds=4),
|
||||
trace_id=topic_service_trace_id,
|
||||
span_id=topic_service_span_id,
|
||||
parent_span_id="",
|
||||
name="topic publish",
|
||||
kind=TracesKind.SPAN_KIND_PRODUCER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "topic-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-001",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "001",
|
||||
},
|
||||
attributes={
|
||||
"message.type": "SENT",
|
||||
"messaging.operation": "publish",
|
||||
"messaging.message.id": "001",
|
||||
"duration_nano": "corrupt_data",
|
||||
"id": 1,
|
||||
},
|
||||
),
|
||||
]
|
||||
116
tests/fixtures/seeder.py
vendored
Normal file
116
tests/fixtures/seeder.py
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
import time
|
||||
from http import HTTPStatus
|
||||
from pathlib import Path
|
||||
|
||||
import docker
|
||||
import docker.errors
|
||||
import pytest
|
||||
import requests
|
||||
from testcontainers.core.container import DockerContainer, Network
|
||||
|
||||
from fixtures import reuse, types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
# Build context is tests/ so `fixtures/` is importable inside the container
|
||||
# under /app/fixtures. This file sits at tests/fixtures/seeder.py, hence
|
||||
# parents[1] = tests/.
|
||||
_TESTS_ROOT = Path(__file__).resolve().parents[1]
|
||||
|
||||
|
||||
@pytest.fixture(name="seeder", scope="package")
|
||||
def seeder(
|
||||
network: Network,
|
||||
clickhouse: types.TestContainerClickhouse,
|
||||
request: pytest.FixtureRequest,
|
||||
pytestconfig: pytest.Config,
|
||||
) -> types.TestContainerDocker:
|
||||
"""
|
||||
HTTP seeder fixture — a Python container exposing POST/DELETE endpoints
|
||||
that wrap the direct-ClickHouse-insert helpers (currently just traces;
|
||||
logs + metrics to follow). Frontend tests call these endpoints to seed
|
||||
telemetry with fine-grained per-test control.
|
||||
"""
|
||||
|
||||
def create() -> types.TestContainerDocker:
|
||||
# docker-py wants `dockerfile` RELATIVE to `path`. The fixture file
|
||||
# lives at tests/fixtures/seeder.py so the build context root is
|
||||
# tests/ (one parent up), and the Dockerfile path inside that
|
||||
# context is Dockerfile.seeder.
|
||||
docker_client = docker.from_env()
|
||||
docker_client.images.build(
|
||||
path=str(_TESTS_ROOT),
|
||||
dockerfile="Dockerfile.seeder",
|
||||
tag="signoz-tests-seeder:latest",
|
||||
rm=True,
|
||||
)
|
||||
|
||||
container = DockerContainer("signoz-tests-seeder:latest")
|
||||
container.with_env(
|
||||
"CH_HOST", clickhouse.container.container_configs["8123"].address
|
||||
)
|
||||
container.with_env(
|
||||
"CH_PORT", str(clickhouse.container.container_configs["8123"].port)
|
||||
)
|
||||
container.with_env(
|
||||
"CH_USER", clickhouse.env["SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_USERNAME"]
|
||||
)
|
||||
container.with_env(
|
||||
"CH_PASSWORD", clickhouse.env["SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_PASSWORD"]
|
||||
)
|
||||
container.with_env(
|
||||
"CH_CLUSTER", clickhouse.env["SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER"]
|
||||
)
|
||||
container.with_exposed_ports(8080)
|
||||
container.with_network(network=network)
|
||||
container.start()
|
||||
|
||||
host = container.get_container_host_ip()
|
||||
host_port = container.get_exposed_port(8080)
|
||||
|
||||
for attempt in range(20):
|
||||
try:
|
||||
response = requests.get(f"http://{host}:{host_port}/healthz", timeout=2)
|
||||
if response.status_code == HTTPStatus.OK:
|
||||
break
|
||||
except Exception as e: # pylint: disable=broad-exception-caught
|
||||
logger.info("seeder attempt %d: %s", attempt + 1, e)
|
||||
time.sleep(1)
|
||||
else:
|
||||
raise TimeoutError("seeder container did not become ready")
|
||||
|
||||
return types.TestContainerDocker(
|
||||
id=container.get_wrapped_container().id,
|
||||
host_configs={
|
||||
"8080": types.TestContainerUrlConfig("http", host, host_port),
|
||||
},
|
||||
container_configs={
|
||||
"8080": types.TestContainerUrlConfig(
|
||||
"http", container.get_wrapped_container().name, 8080
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
def delete(container: types.TestContainerDocker) -> None:
|
||||
client = docker.from_env()
|
||||
try:
|
||||
client.containers.get(container_id=container.id).stop()
|
||||
client.containers.get(container_id=container.id).remove(v=True)
|
||||
except docker.errors.NotFound:
|
||||
logger.info("Seeder container %s already gone", container.id)
|
||||
|
||||
def restore(cache: dict) -> types.TestContainerDocker:
|
||||
return types.TestContainerDocker.from_cache(cache)
|
||||
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"seeder",
|
||||
empty=lambda: types.TestContainerDocker(
|
||||
id="", host_configs={}, container_configs={}
|
||||
),
|
||||
create=create,
|
||||
delete=delete,
|
||||
restore=restore,
|
||||
)
|
||||
@@ -2,6 +2,7 @@ import platform
|
||||
import time
|
||||
from http import HTTPStatus
|
||||
from os import path
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import docker
|
||||
@@ -11,11 +12,16 @@ import requests
|
||||
from testcontainers.core.container import DockerContainer, Network
|
||||
from testcontainers.core.image import DockerImage
|
||||
|
||||
from fixtures import dev, types
|
||||
from fixtures import reuse, types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
# Absolute path to the signoz repo root. Anchored to this file so the build
|
||||
# context resolves correctly regardless of pytest's cwd (tests/ vs
|
||||
# tests/integration/). fixtures/signoz.py -> fixtures/ -> tests/ -> repo root.
|
||||
_REPO_ROOT = Path(__file__).resolve().parents[2]
|
||||
|
||||
|
||||
def create_signoz(
|
||||
network: Network,
|
||||
@@ -50,7 +56,7 @@ def create_signoz(
|
||||
dockerfile_path = "cmd/enterprise/Dockerfile.with-web.integration"
|
||||
|
||||
self = DockerImage(
|
||||
path="../../",
|
||||
path=str(_REPO_ROOT),
|
||||
dockerfile_path=dockerfile_path,
|
||||
tag="signoz:integration",
|
||||
buildargs={
|
||||
@@ -181,7 +187,7 @@ def create_signoz(
|
||||
gateway=gateway,
|
||||
)
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
cache_key,
|
||||
@@ -4,7 +4,7 @@ from typing import Any, Generator
|
||||
import pytest
|
||||
from sqlalchemy import create_engine, sql
|
||||
|
||||
from fixtures import dev, types
|
||||
from fixtures import reuse, types
|
||||
|
||||
ConnectionTuple = namedtuple("ConnectionTuple", "connection config")
|
||||
|
||||
@@ -64,7 +64,7 @@ def sqlite(
|
||||
env=cache["env"],
|
||||
)
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"sqlite",
|
||||
@@ -1,5 +1,4 @@
|
||||
import datetime
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
import isodate
|
||||
@@ -26,8 +25,3 @@ def parse_duration(duration: Any) -> datetime.timedelta:
|
||||
if isinstance(duration, datetime.timedelta):
|
||||
return duration
|
||||
return datetime.timedelta(seconds=duration)
|
||||
|
||||
|
||||
def get_testdata_file_path(file: str) -> str:
|
||||
testdata_dir = os.path.join(os.path.dirname(__file__), "..", "testdata")
|
||||
return os.path.join(testdata_dir, file)
|
||||
@@ -13,7 +13,7 @@ import pytest
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.fingerprint import LogsOrTracesFingerprint
|
||||
from fixtures.utils import parse_duration, parse_timestamp
|
||||
from fixtures.time import parse_duration, parse_timestamp
|
||||
|
||||
|
||||
class TracesKind(Enum):
|
||||
@@ -689,131 +689,144 @@ class Traces(ABC):
|
||||
return traces
|
||||
|
||||
|
||||
def insert_traces_to_clickhouse(conn, traces: List[Traces]) -> None:
|
||||
"""
|
||||
Insert traces into ClickHouse tables following the same logic as the Go exporter.
|
||||
Handles insertion into:
|
||||
- distributed_signoz_index_v3 (main traces table)
|
||||
- distributed_traces_v3_resource (resource fingerprints)
|
||||
- distributed_tag_attributes_v2 (tag attributes)
|
||||
- distributed_span_attributes_keys (attribute keys)
|
||||
- distributed_signoz_error_index_v2 (error events)
|
||||
|
||||
Pure function so the seeder container (tests/seeder/) can reuse the
|
||||
exact insert path used by the pytest fixtures. `conn` is a
|
||||
clickhouse-connect Client.
|
||||
"""
|
||||
resources: List[TracesResource] = []
|
||||
for trace in traces:
|
||||
resources.extend(trace.resource)
|
||||
|
||||
if len(resources) > 0:
|
||||
conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_traces_v3_resource",
|
||||
data=[resource.np_arr() for resource in resources],
|
||||
)
|
||||
|
||||
tag_attributes: List[TracesTagAttributes] = []
|
||||
for trace in traces:
|
||||
tag_attributes.extend(trace.tag_attributes)
|
||||
|
||||
if len(tag_attributes) > 0:
|
||||
conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_tag_attributes_v2",
|
||||
data=[tag_attribute.np_arr() for tag_attribute in tag_attributes],
|
||||
)
|
||||
|
||||
attribute_keys: List[TracesResourceOrAttributeKeys] = []
|
||||
resource_keys: List[TracesResourceOrAttributeKeys] = []
|
||||
for trace in traces:
|
||||
attribute_keys.extend(trace.attribute_keys)
|
||||
resource_keys.extend(trace.resource_keys)
|
||||
|
||||
if len(attribute_keys) > 0:
|
||||
conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_span_attributes_keys",
|
||||
data=[attribute_key.np_arr() for attribute_key in attribute_keys],
|
||||
)
|
||||
|
||||
if len(resource_keys) > 0:
|
||||
conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_span_attributes_keys",
|
||||
data=[resource_key.np_arr() for resource_key in resource_keys],
|
||||
)
|
||||
|
||||
conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_signoz_index_v3",
|
||||
column_names=[
|
||||
"ts_bucket_start",
|
||||
"resource_fingerprint",
|
||||
"timestamp",
|
||||
"trace_id",
|
||||
"span_id",
|
||||
"trace_state",
|
||||
"parent_span_id",
|
||||
"flags",
|
||||
"name",
|
||||
"kind",
|
||||
"kind_string",
|
||||
"duration_nano",
|
||||
"status_code",
|
||||
"status_message",
|
||||
"status_code_string",
|
||||
"attributes_string",
|
||||
"attributes_number",
|
||||
"attributes_bool",
|
||||
"resources_string",
|
||||
"events",
|
||||
"links",
|
||||
"response_status_code",
|
||||
"external_http_url",
|
||||
"http_url",
|
||||
"external_http_method",
|
||||
"http_method",
|
||||
"http_host",
|
||||
"db_name",
|
||||
"db_operation",
|
||||
"has_error",
|
||||
"is_remote",
|
||||
"resource",
|
||||
],
|
||||
data=[trace.np_arr() for trace in traces],
|
||||
)
|
||||
|
||||
error_events: List[TracesErrorEvent] = []
|
||||
for trace in traces:
|
||||
error_events.extend(trace.error_events)
|
||||
|
||||
if len(error_events) > 0:
|
||||
conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_signoz_error_index_v2",
|
||||
data=[error_event.np_arr() for error_event in error_events],
|
||||
)
|
||||
|
||||
|
||||
_TRACES_TABLES_TO_TRUNCATE = [
|
||||
"signoz_index_v3",
|
||||
"traces_v3_resource",
|
||||
"tag_attributes_v2",
|
||||
"span_attributes_keys",
|
||||
"signoz_error_index_v2",
|
||||
]
|
||||
|
||||
|
||||
def truncate_traces_tables(conn, cluster: str) -> None:
|
||||
"""Truncate all traces tables. Used by the pytest fixture teardown and by
|
||||
the seeder's DELETE /telemetry/traces endpoint."""
|
||||
for table in _TRACES_TABLES_TO_TRUNCATE:
|
||||
conn.query(
|
||||
f"TRUNCATE TABLE signoz_traces.{table} ON CLUSTER '{cluster}' SYNC"
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(name="insert_traces", scope="function")
|
||||
def insert_traces(
|
||||
clickhouse: types.TestContainerClickhouse,
|
||||
) -> Generator[Callable[[List[Traces]], None], Any, None]:
|
||||
def _insert_traces(traces: List[Traces]) -> None:
|
||||
"""
|
||||
Insert traces into ClickHouse tables following the same logic as the Go exporter.
|
||||
This function handles insertion into multiple tables:
|
||||
- distributed_signoz_index_v3 (main traces table)
|
||||
- distributed_traces_v3_resource (resource fingerprints)
|
||||
- distributed_tag_attributes_v2 (tag attributes)
|
||||
- distributed_span_attributes_keys (attribute keys)
|
||||
- distributed_signoz_error_index_v2 (error events)
|
||||
"""
|
||||
resources: List[TracesResource] = []
|
||||
for trace in traces:
|
||||
resources.extend(trace.resource)
|
||||
|
||||
if len(resources) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_traces_v3_resource",
|
||||
data=[resource.np_arr() for resource in resources],
|
||||
)
|
||||
|
||||
tag_attributes: List[TracesTagAttributes] = []
|
||||
for trace in traces:
|
||||
tag_attributes.extend(trace.tag_attributes)
|
||||
|
||||
if len(tag_attributes) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_tag_attributes_v2",
|
||||
data=[tag_attribute.np_arr() for tag_attribute in tag_attributes],
|
||||
)
|
||||
|
||||
attribute_keys: List[TracesResourceOrAttributeKeys] = []
|
||||
resource_keys: List[TracesResourceOrAttributeKeys] = []
|
||||
for trace in traces:
|
||||
attribute_keys.extend(trace.attribute_keys)
|
||||
resource_keys.extend(trace.resource_keys)
|
||||
|
||||
if len(attribute_keys) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_span_attributes_keys",
|
||||
data=[attribute_key.np_arr() for attribute_key in attribute_keys],
|
||||
)
|
||||
|
||||
if len(resource_keys) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_span_attributes_keys",
|
||||
data=[resource_key.np_arr() for resource_key in resource_keys],
|
||||
)
|
||||
|
||||
# Insert main traces
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_signoz_index_v3",
|
||||
column_names=[
|
||||
"ts_bucket_start",
|
||||
"resource_fingerprint",
|
||||
"timestamp",
|
||||
"trace_id",
|
||||
"span_id",
|
||||
"trace_state",
|
||||
"parent_span_id",
|
||||
"flags",
|
||||
"name",
|
||||
"kind",
|
||||
"kind_string",
|
||||
"duration_nano",
|
||||
"status_code",
|
||||
"status_message",
|
||||
"status_code_string",
|
||||
"attributes_string",
|
||||
"attributes_number",
|
||||
"attributes_bool",
|
||||
"resources_string",
|
||||
"events",
|
||||
"links",
|
||||
"response_status_code",
|
||||
"external_http_url",
|
||||
"http_url",
|
||||
"external_http_method",
|
||||
"http_method",
|
||||
"http_host",
|
||||
"db_name",
|
||||
"db_operation",
|
||||
"has_error",
|
||||
"is_remote",
|
||||
"resource",
|
||||
],
|
||||
data=[trace.np_arr() for trace in traces],
|
||||
)
|
||||
|
||||
# Insert error events
|
||||
error_events: List[TracesErrorEvent] = []
|
||||
for trace in traces:
|
||||
error_events.extend(trace.error_events)
|
||||
|
||||
if len(error_events) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_signoz_error_index_v2",
|
||||
data=[error_event.np_arr() for error_event in error_events],
|
||||
)
|
||||
insert_traces_to_clickhouse(clickhouse.conn, traces)
|
||||
|
||||
yield _insert_traces
|
||||
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_traces.signoz_index_v3 ON CLUSTER '{clickhouse.env['SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER']}' SYNC"
|
||||
)
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_traces.traces_v3_resource ON CLUSTER '{clickhouse.env['SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER']}' SYNC"
|
||||
)
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_traces.tag_attributes_v2 ON CLUSTER '{clickhouse.env['SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER']}' SYNC"
|
||||
)
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_traces.span_attributes_keys ON CLUSTER '{clickhouse.env['SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER']}' SYNC"
|
||||
)
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_traces.signoz_error_index_v2 ON CLUSTER '{clickhouse.env['SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER']}' SYNC"
|
||||
truncate_traces_tables(
|
||||
clickhouse.conn,
|
||||
clickhouse.env["SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER"],
|
||||
)
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ import docker.errors
|
||||
import pytest
|
||||
from testcontainers.core.container import DockerContainer, Network
|
||||
|
||||
from fixtures import dev, types
|
||||
from fixtures import reuse, types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -58,7 +58,7 @@ def zookeeper(
|
||||
def restore(cache: dict) -> types.TestContainerDocker:
|
||||
return types.TestContainerDocker.from_cache(cache)
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"zookeeper",
|
||||
@@ -1,110 +0,0 @@
|
||||
from datetime import datetime, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, List
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.logger import setup_logger
|
||||
from fixtures.logs import Logs
|
||||
from fixtures.metrics import Metrics
|
||||
from fixtures.traces import Traces
|
||||
from fixtures.utils import get_testdata_file_path
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
|
||||
@pytest.fixture(name="create_alert_rule", scope="function")
|
||||
def create_alert_rule(
|
||||
signoz: types.SigNoz, get_token: Callable[[str, str], str]
|
||||
) -> Callable[[dict], str]:
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
rule_ids = []
|
||||
|
||||
def _create_alert_rule(rule_data: dict) -> str:
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/rules"),
|
||||
json=rule_data,
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert (
|
||||
response.status_code == HTTPStatus.OK
|
||||
), f"Failed to create rule, api returned {response.status_code} with response: {response.text}"
|
||||
rule_id = response.json()["data"]["id"]
|
||||
rule_ids.append(rule_id)
|
||||
return rule_id
|
||||
|
||||
def _delete_alert_rule(rule_id: str):
|
||||
logger.info("Deleting rule: %s", {"rule_id": rule_id})
|
||||
response = requests.delete(
|
||||
signoz.self.host_configs["8080"].get(f"/api/v1/rules/{rule_id}"),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
if response.status_code != HTTPStatus.OK:
|
||||
raise Exception( # pylint: disable=broad-exception-raised
|
||||
f"Failed to delete rule, api returned {response.status_code} with response: {response.text}"
|
||||
)
|
||||
|
||||
yield _create_alert_rule
|
||||
# delete the rule on cleanup
|
||||
for rule_id in rule_ids:
|
||||
try:
|
||||
_delete_alert_rule(rule_id)
|
||||
except Exception as e: # pylint: disable=broad-exception-caught
|
||||
logger.error("Error deleting rule: %s", {"rule_id": rule_id, "error": e})
|
||||
|
||||
|
||||
@pytest.fixture(name="insert_alert_data", scope="function")
|
||||
def insert_alert_data(
|
||||
insert_metrics: Callable[[List[Metrics]], None],
|
||||
insert_traces: Callable[[List[Traces]], None],
|
||||
insert_logs: Callable[[List[Logs]], None],
|
||||
) -> Callable[[List[types.AlertData]], None]:
|
||||
|
||||
def _insert_alert_data(
|
||||
alert_data_items: List[types.AlertData],
|
||||
base_time: datetime = None,
|
||||
) -> None:
|
||||
|
||||
metrics: List[Metrics] = []
|
||||
traces: List[Traces] = []
|
||||
logs: List[Logs] = []
|
||||
|
||||
now = base_time or datetime.now(tz=timezone.utc).replace(
|
||||
second=0, microsecond=0
|
||||
)
|
||||
|
||||
for data_item in alert_data_items:
|
||||
if data_item.type == "metrics":
|
||||
_metrics = Metrics.load_from_file(
|
||||
get_testdata_file_path(data_item.data_path),
|
||||
base_time=now,
|
||||
)
|
||||
metrics.extend(_metrics)
|
||||
elif data_item.type == "traces":
|
||||
_traces = Traces.load_from_file(
|
||||
get_testdata_file_path(data_item.data_path),
|
||||
base_time=now,
|
||||
)
|
||||
traces.extend(_traces)
|
||||
elif data_item.type == "logs":
|
||||
_logs = Logs.load_from_file(
|
||||
get_testdata_file_path(data_item.data_path),
|
||||
base_time=now,
|
||||
)
|
||||
logs.extend(_logs)
|
||||
|
||||
# Add data to ClickHouse if any data is present
|
||||
if len(metrics) > 0:
|
||||
insert_metrics(metrics)
|
||||
if len(traces) > 0:
|
||||
insert_traces(traces)
|
||||
if len(logs) > 0:
|
||||
insert_logs(logs)
|
||||
|
||||
yield _insert_alert_data
|
||||
@@ -1,115 +0,0 @@
|
||||
"""Reusable helpers for user API tests."""
|
||||
|
||||
from http import HTTPStatus
|
||||
from typing import Dict
|
||||
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
|
||||
USERS_BASE = "/api/v2/users"
|
||||
|
||||
|
||||
def create_active_user(
|
||||
signoz: types.SigNoz,
|
||||
admin_token: str,
|
||||
email: str,
|
||||
role: str,
|
||||
password: str,
|
||||
name: str = "",
|
||||
) -> str:
|
||||
"""Invite a user and activate via resetPassword. Returns user ID."""
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/invite"),
|
||||
json={"email": email, "role": role, "name": name},
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.CREATED, response.text
|
||||
invited_user = response.json()["data"]
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/resetPassword"),
|
||||
json={"password": password, "token": invited_user["token"]},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.NO_CONTENT, response.text
|
||||
|
||||
return invited_user["id"]
|
||||
|
||||
|
||||
def find_user_by_email(signoz: types.SigNoz, token: str, email: str) -> Dict:
|
||||
"""Find a user by email from the user list. Raises AssertionError if not found."""
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(USERS_BASE),
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.OK, response.text
|
||||
user = next((u for u in response.json()["data"] if u["email"] == email), None)
|
||||
assert user is not None, f"User with email '{email}' not found"
|
||||
return user
|
||||
|
||||
|
||||
def find_user_with_roles_by_email(signoz: types.SigNoz, token: str, email: str) -> Dict:
|
||||
"""Find a user by email and return UserWithRoles (user fields + userRoles).
|
||||
|
||||
Raises AssertionError if the user is not found.
|
||||
"""
|
||||
user = find_user_by_email(signoz, token, email)
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(f"{USERS_BASE}/{user['id']}"),
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.OK, response.text
|
||||
return response.json()["data"]
|
||||
|
||||
|
||||
def assert_user_has_role(data: Dict, role_name: str) -> None:
|
||||
"""Assert that a UserWithRoles response contains the expected managed role."""
|
||||
role_names = {ur["role"]["name"] for ur in data.get("userRoles", [])}
|
||||
assert role_name in role_names, f"Expected role '{role_name}' in {role_names}"
|
||||
|
||||
|
||||
def change_user_role(
|
||||
signoz: types.SigNoz,
|
||||
admin_token: str,
|
||||
user_id: str,
|
||||
old_role: str,
|
||||
new_role: str,
|
||||
) -> None:
|
||||
"""Change a user's role (remove old, assign new).
|
||||
|
||||
Role names should be managed role names (e.g. signoz-editor).
|
||||
"""
|
||||
# Get current roles to find the old role's ID
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(f"{USERS_BASE}/{user_id}/roles"),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.OK, response.text
|
||||
roles = response.json()["data"]
|
||||
|
||||
old_role_entry = next((r for r in roles if r["name"] == old_role), None)
|
||||
assert old_role_entry is not None, f"User does not have role '{old_role}'"
|
||||
|
||||
# Remove old role
|
||||
response = requests.delete(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"{USERS_BASE}/{user_id}/roles/{old_role_entry['id']}"
|
||||
),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.NO_CONTENT, response.text
|
||||
|
||||
# Assign new role
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(f"{USERS_BASE}/{user_id}/roles"),
|
||||
json={"name": new_role},
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.OK, response.text
|
||||
@@ -1,154 +0,0 @@
|
||||
"""Fixtures for cloud integration tests."""
|
||||
|
||||
from typing import Callable
|
||||
|
||||
import requests
|
||||
from wiremock.client import (
|
||||
HttpMethods,
|
||||
Mapping,
|
||||
MappingRequest,
|
||||
MappingResponse,
|
||||
WireMockMatchers,
|
||||
)
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
|
||||
def deprecated_simulate_agent_checkin(
|
||||
signoz: types.SigNoz,
|
||||
admin_token: str,
|
||||
cloud_provider: str,
|
||||
account_id: str,
|
||||
cloud_account_id: str,
|
||||
) -> requests.Response:
|
||||
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/agent-check-in"
|
||||
|
||||
checkin_payload = {
|
||||
"account_id": account_id,
|
||||
"cloud_account_id": cloud_account_id,
|
||||
"data": {},
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
json=checkin_payload,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
if not response.ok:
|
||||
logger.error(
|
||||
"Agent check-in failed: %s, response: %s",
|
||||
response.status_code,
|
||||
response.text,
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def setup_create_account_mocks(
|
||||
signoz: types.SigNoz,
|
||||
make_http_mocks: Callable,
|
||||
) -> None:
|
||||
"""Set up Zeus and Gateway mocks required by the CreateAccount endpoint."""
|
||||
make_http_mocks(
|
||||
signoz.zeus,
|
||||
[
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.GET,
|
||||
url="/v2/deployments/me",
|
||||
headers={
|
||||
"X-Signoz-Cloud-Api-Key": {
|
||||
WireMockMatchers.EQUAL_TO: "secret-key"
|
||||
}
|
||||
},
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": {
|
||||
"name": "test-deployment",
|
||||
"cluster": {"region": {"dns": "test.signoz.cloud"}},
|
||||
},
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
)
|
||||
],
|
||||
)
|
||||
make_http_mocks(
|
||||
signoz.gateway,
|
||||
[
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.GET,
|
||||
url="/v1/workspaces/me/keys/search?name=aws-integration&page=1&per_page=10",
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": [],
|
||||
"_pagination": {"page": 1, "per_page": 10, "total": 0},
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
),
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.POST,
|
||||
url="/v1/workspaces/me/keys",
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": {
|
||||
"name": "aws-integration",
|
||||
"value": "test-ingestion-key-123456",
|
||||
},
|
||||
"error": "",
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
def simulate_agent_checkin(
|
||||
signoz: types.SigNoz,
|
||||
admin_token: str,
|
||||
cloud_provider: str,
|
||||
account_id: str,
|
||||
cloud_account_id: str,
|
||||
data: dict | None = None,
|
||||
) -> requests.Response:
|
||||
endpoint = f"/api/v1/cloud_integrations/{cloud_provider}/accounts/check_in"
|
||||
|
||||
checkin_payload = {
|
||||
"cloudIntegrationId": account_id,
|
||||
"providerAccountId": cloud_account_id,
|
||||
"data": data or {},
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
json=checkin_payload,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
if not response.ok:
|
||||
logger.error(
|
||||
"Agent check-in failed: %s, response: %s",
|
||||
response.status_code,
|
||||
response.text,
|
||||
)
|
||||
|
||||
return response
|
||||
@@ -1,256 +0,0 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import List
|
||||
|
||||
import requests
|
||||
|
||||
from fixtures.logs import Logs
|
||||
from fixtures.traces import TraceIdGenerator, Traces, TracesKind, TracesStatusCode
|
||||
|
||||
|
||||
def format_timestamp(dt: datetime) -> str:
|
||||
"""
|
||||
Format a datetime object to match the API's timestamp format.
|
||||
The API returns timestamps with minimal fractional seconds precision.
|
||||
Example: 2026-02-03T20:54:56.5Z for 500000 microseconds
|
||||
"""
|
||||
base_str = dt.strftime("%Y-%m-%dT%H:%M:%S")
|
||||
if dt.microsecond:
|
||||
# Convert microseconds to fractional seconds and strip trailing zeros
|
||||
fractional = f"{dt.microsecond / 1000000:.6f}"[2:].rstrip("0")
|
||||
return f"{base_str}.{fractional}Z"
|
||||
return f"{base_str}Z"
|
||||
|
||||
|
||||
def assert_identical_query_response(
|
||||
response1: requests.Response, response2: requests.Response
|
||||
) -> None:
|
||||
"""
|
||||
Assert that two query responses are identical in status and data.
|
||||
"""
|
||||
assert response1.status_code == response2.status_code, "Status codes do not match"
|
||||
if response1.status_code == HTTPStatus.OK:
|
||||
assert (
|
||||
response1.json()["status"] == response2.json()["status"]
|
||||
), "Response statuses do not match"
|
||||
assert (
|
||||
response1.json()["data"]["data"]["results"]
|
||||
== response2.json()["data"]["data"]["results"]
|
||||
), "Response data do not match"
|
||||
|
||||
|
||||
def generate_logs_with_corrupt_metadata() -> List[Logs]:
|
||||
"""
|
||||
Specifically, entries with 'id', 'timestamp', 'severity_text', 'severity_number' and 'body' fields in metadata
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
return [
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=4),
|
||||
body="POST /integration request received",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"timestamp": "corrupt_data",
|
||||
},
|
||||
attributes={
|
||||
"net.transport": "IP.TCP",
|
||||
"http.scheme": "http",
|
||||
"http.user_agent": "Integration Test",
|
||||
"http.request.method": "POST",
|
||||
"http.response.status_code": "200",
|
||||
"severity_text": "corrupt_data",
|
||||
"timestamp": "corrupt_data",
|
||||
},
|
||||
trace_id="1",
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=3),
|
||||
body="SELECT query executed",
|
||||
severity_text="DEBUG",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"severity_number": "corrupt_data",
|
||||
"id": "corrupt_data",
|
||||
},
|
||||
attributes={
|
||||
"db.name": "integration",
|
||||
"db.operation": "SELECT",
|
||||
"db.statement": "SELECT * FROM integration",
|
||||
"trace_id": "2",
|
||||
},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=2),
|
||||
body="HTTP PATCH failed with 404",
|
||||
severity_text="WARN",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"body": "corrupt_data",
|
||||
"trace_id": "3",
|
||||
},
|
||||
attributes={
|
||||
"http.request.method": "PATCH",
|
||||
"http.status_code": "404",
|
||||
"id": "1",
|
||||
},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=1),
|
||||
body="{'trace_id': '4'}",
|
||||
severity_text="ERROR",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "topic-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-001",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "001",
|
||||
},
|
||||
attributes={
|
||||
"message.type": "SENT",
|
||||
"messaging.operation": "publish",
|
||||
"messaging.message.id": "001",
|
||||
"body": "corrupt_data",
|
||||
"timestamp": "corrupt_data",
|
||||
},
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def generate_traces_with_corrupt_metadata() -> List[Traces]:
|
||||
"""
|
||||
Specifically, entries with 'id', 'timestamp', 'trace_id' and 'duration_nano' fields in metadata
|
||||
"""
|
||||
http_service_trace_id = TraceIdGenerator.trace_id()
|
||||
http_service_span_id = TraceIdGenerator.span_id()
|
||||
http_service_db_span_id = TraceIdGenerator.span_id()
|
||||
http_service_patch_span_id = TraceIdGenerator.span_id()
|
||||
topic_service_trace_id = TraceIdGenerator.trace_id()
|
||||
topic_service_span_id = TraceIdGenerator.span_id()
|
||||
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
return [
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=4),
|
||||
duration=timedelta(seconds=3),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_span_id,
|
||||
parent_span_id="",
|
||||
name="POST /integration",
|
||||
kind=TracesKind.SPAN_KIND_SERVER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"trace_id": "corrupt_data",
|
||||
},
|
||||
attributes={
|
||||
"net.transport": "IP.TCP",
|
||||
"http.scheme": "http",
|
||||
"http.user_agent": "Integration Test",
|
||||
"http.request.method": "POST",
|
||||
"http.response.status_code": "200",
|
||||
"timestamp": "corrupt_data",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=3.5),
|
||||
duration=timedelta(seconds=5),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_db_span_id,
|
||||
parent_span_id=http_service_span_id,
|
||||
name="SELECT",
|
||||
kind=TracesKind.SPAN_KIND_CLIENT,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"timestamp": "corrupt_data",
|
||||
},
|
||||
attributes={
|
||||
"db.name": "integration",
|
||||
"db.operation": "SELECT",
|
||||
"db.statement": "SELECT * FROM integration",
|
||||
"trace_d": "corrupt_data",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=3),
|
||||
duration=timedelta(seconds=1),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_patch_span_id,
|
||||
parent_span_id=http_service_span_id,
|
||||
name="HTTP PATCH",
|
||||
kind=TracesKind.SPAN_KIND_CLIENT,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"duration_nano": "corrupt_data",
|
||||
},
|
||||
attributes={
|
||||
"http.request.method": "PATCH",
|
||||
"http.status_code": "404",
|
||||
"id": "1",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=1),
|
||||
duration=timedelta(seconds=4),
|
||||
trace_id=topic_service_trace_id,
|
||||
span_id=topic_service_span_id,
|
||||
parent_span_id="",
|
||||
name="topic publish",
|
||||
kind=TracesKind.SPAN_KIND_PRODUCER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "topic-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-001",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "001",
|
||||
},
|
||||
attributes={
|
||||
"message.type": "SENT",
|
||||
"messaging.operation": "publish",
|
||||
"messaging.message.id": "001",
|
||||
"duration_nano": "corrupt_data",
|
||||
"id": 1,
|
||||
},
|
||||
),
|
||||
]
|
||||
@@ -7,12 +7,12 @@ import pytest
|
||||
from wiremock.client import HttpMethods, Mapping, MappingRequest, MappingResponse
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.alertutils import (
|
||||
from fixtures.alerts import (
|
||||
update_rule_channel_name,
|
||||
verify_webhook_alert_expectation,
|
||||
)
|
||||
from fixtures.fs import get_testdata_file_path
|
||||
from fixtures.logger import setup_logger
|
||||
from fixtures.utils import get_testdata_file_path
|
||||
|
||||
# Alert test cases use a 30-second wait time to verify expected alert firing.
|
||||
# Alert data is set up to trigger on the first rule manager evaluation.
|
||||
@@ -11,12 +11,10 @@ from fixtures.auth import (
|
||||
USER_ADMIN_EMAIL,
|
||||
USER_ADMIN_PASSWORD,
|
||||
add_license,
|
||||
)
|
||||
from fixtures.authutils import (
|
||||
assert_user_has_role,
|
||||
find_user_with_roles_by_email,
|
||||
)
|
||||
from fixtures.idputils import (
|
||||
from fixtures.idp import (
|
||||
get_saml_domain,
|
||||
perform_saml_login,
|
||||
)
|
||||
@@ -10,12 +10,10 @@ from fixtures.auth import (
|
||||
USER_ADMIN_EMAIL,
|
||||
USER_ADMIN_PASSWORD,
|
||||
add_license,
|
||||
)
|
||||
from fixtures.authutils import (
|
||||
assert_user_has_role,
|
||||
find_user_with_roles_by_email,
|
||||
)
|
||||
from fixtures.idputils import (
|
||||
from fixtures.idp import (
|
||||
get_oidc_domain,
|
||||
perform_oidc_login,
|
||||
)
|
||||
@@ -6,7 +6,7 @@ import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.cloudintegrationsutils import deprecated_simulate_agent_checkin
|
||||
from fixtures.cloudintegrations import deprecated_simulate_agent_checkin
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -6,7 +6,7 @@ import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.cloudintegrationsutils import deprecated_simulate_agent_checkin
|
||||
from fixtures.cloudintegrations import deprecated_simulate_agent_checkin
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -6,7 +6,7 @@ import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.cloudintegrationsutils import deprecated_simulate_agent_checkin
|
||||
from fixtures.cloudintegrations import deprecated_simulate_agent_checkin
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -11,7 +11,7 @@ from wiremock.client import (
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD, add_license
|
||||
from fixtures.cloudintegrationsutils import setup_create_account_mocks
|
||||
from fixtures.cloudintegrations import setup_create_account_mocks
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -4,7 +4,7 @@ from typing import Callable
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD, add_license
|
||||
from fixtures.cloudintegrationsutils import simulate_agent_checkin
|
||||
from fixtures.cloudintegrations import simulate_agent_checkin
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -6,7 +6,7 @@ import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD, add_license
|
||||
from fixtures.cloudintegrationsutils import simulate_agent_checkin
|
||||
from fixtures.cloudintegrations import simulate_agent_checkin
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -6,7 +6,7 @@ import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD, add_license
|
||||
from fixtures.cloudintegrationsutils import simulate_agent_checkin
|
||||
from fixtures.cloudintegrations import simulate_agent_checkin
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -11,7 +11,7 @@ from wiremock.client import (
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD, add_license
|
||||
from fixtures.gatewayutils import (
|
||||
from fixtures.gateway import (
|
||||
TEST_KEY_ID,
|
||||
common_gateway_headers,
|
||||
get_gateway_requests,
|
||||
@@ -11,7 +11,7 @@ from wiremock.client import (
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import add_license
|
||||
from fixtures.gatewayutils import (
|
||||
from fixtures.gateway import (
|
||||
TEST_KEY_ID,
|
||||
TEST_LIMIT_ID,
|
||||
common_gateway_headers,
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user