mirror of
https://github.com/SigNoz/signoz.git
synced 2026-04-22 11:50:29 +01:00
Compare commits
54 Commits
platform-p
...
tests/unif
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c832d81488 | ||
|
|
e36cf45431 | ||
|
|
97aceea50c | ||
|
|
7f0ff08066 | ||
|
|
6ab90a782b | ||
|
|
3e9a618721 | ||
|
|
8af041a345 | ||
|
|
52cf194111 | ||
|
|
f80b899dbe | ||
|
|
d2898efce8 | ||
|
|
c999a2f44f | ||
|
|
c3a889d7e1 | ||
|
|
c35ff1d4d5 | ||
|
|
40d429bc89 | ||
|
|
8a58db9bda | ||
|
|
fe735b66b7 | ||
|
|
887d5c47b2 | ||
|
|
e0778973ac | ||
|
|
9f2269fee6 | ||
|
|
6481b660ee | ||
|
|
de0396d5bd | ||
|
|
49ef953b15 | ||
|
|
24513f305d | ||
|
|
34d36ecd2c | ||
|
|
0e13757719 | ||
|
|
8633b3d358 | ||
|
|
c4bde774e1 | ||
|
|
acff718113 | ||
|
|
8d4122df22 | ||
|
|
138b0cd606 | ||
|
|
c17e54ad01 | ||
|
|
51581160eb | ||
|
|
7959e9eadd | ||
|
|
a6faab083f | ||
|
|
d43c2bb4d7 | ||
|
|
68c8504ac7 | ||
|
|
523dcd6219 | ||
|
|
5ebe95e3d6 | ||
|
|
527963b7f4 | ||
|
|
afcc02882d | ||
|
|
f4748f7088 | ||
|
|
36d766d3d9 | ||
|
|
96188a38b4 | ||
|
|
8cfa3bbe94 | ||
|
|
0d97f543df | ||
|
|
be7099b2b4 | ||
|
|
ab6e8291fe | ||
|
|
0839c532bc | ||
|
|
5ef206a666 | ||
|
|
fce92115a9 | ||
|
|
9743002edf | ||
|
|
0efde7b5ce | ||
|
|
8bdaecbe25 | ||
|
|
deb90abd9c |
10
.github/workflows/integrationci.yaml
vendored
10
.github/workflows/integrationci.yaml
vendored
@@ -25,11 +25,11 @@ jobs:
|
||||
uses: astral-sh/setup-uv@v4
|
||||
- name: install
|
||||
run: |
|
||||
cd tests/integration && uv sync
|
||||
cd tests && uv sync
|
||||
- name: fmt
|
||||
run: |
|
||||
make py-fmt
|
||||
git diff --exit-code -- tests/integration/
|
||||
git diff --exit-code -- tests/
|
||||
- name: lint
|
||||
run: |
|
||||
make py-lint
|
||||
@@ -79,7 +79,7 @@ jobs:
|
||||
uses: astral-sh/setup-uv@v4
|
||||
- name: install
|
||||
run: |
|
||||
cd tests/integration && uv sync
|
||||
cd tests && uv sync
|
||||
- name: webdriver
|
||||
run: |
|
||||
wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
|
||||
@@ -99,10 +99,10 @@ jobs:
|
||||
google-chrome-stable --version
|
||||
- name: run
|
||||
run: |
|
||||
cd tests/integration && \
|
||||
cd tests && \
|
||||
uv run pytest \
|
||||
--basetemp=./tmp/ \
|
||||
src/${{matrix.src}} \
|
||||
integration/src/${{matrix.src}} \
|
||||
--sqlstore-provider ${{matrix.sqlstore-provider}} \
|
||||
--sqlite-mode ${{matrix.sqlite-mode}} \
|
||||
--postgres-version ${{matrix.postgres-version}} \
|
||||
|
||||
22
Makefile
22
Makefile
@@ -201,26 +201,26 @@ docker-buildx-enterprise: go-build-enterprise js-build
|
||||
# python commands
|
||||
##############################################################
|
||||
.PHONY: py-fmt
|
||||
py-fmt: ## Run black for integration tests
|
||||
@cd tests/integration && uv run black .
|
||||
py-fmt: ## Run black across the shared tests project
|
||||
@cd tests && uv run black .
|
||||
|
||||
.PHONY: py-lint
|
||||
py-lint: ## Run lint for integration tests
|
||||
@cd tests/integration && uv run isort .
|
||||
@cd tests/integration && uv run autoflake .
|
||||
@cd tests/integration && uv run pylint .
|
||||
py-lint: ## Run lint across the shared tests project
|
||||
@cd tests && uv run isort .
|
||||
@cd tests && uv run autoflake .
|
||||
@cd tests && uv run pylint .
|
||||
|
||||
.PHONY: py-test-setup
|
||||
py-test-setup: ## Runs integration tests
|
||||
@cd tests/integration && uv run pytest --basetemp=./tmp/ -vv --reuse --capture=no src/bootstrap/setup.py::test_setup
|
||||
py-test-setup: ## Bring up the shared SigNoz backend used by integration and e2e tests
|
||||
@cd tests && uv run pytest --basetemp=./tmp/ -vv --reuse --capture=no integration/bootstrap/setup.py::test_setup
|
||||
|
||||
.PHONY: py-test-teardown
|
||||
py-test-teardown: ## Runs integration tests with teardown
|
||||
@cd tests/integration && uv run pytest --basetemp=./tmp/ -vv --teardown --capture=no src/bootstrap/setup.py::test_teardown
|
||||
py-test-teardown: ## Tear down the shared SigNoz backend
|
||||
@cd tests && uv run pytest --basetemp=./tmp/ -vv --teardown --capture=no integration/bootstrap/setup.py::test_teardown
|
||||
|
||||
.PHONY: py-test
|
||||
py-test: ## Runs integration tests
|
||||
@cd tests/integration && uv run pytest --basetemp=./tmp/ -vv --capture=no src/
|
||||
@cd tests && uv run pytest --basetemp=./tmp/ -vv --capture=no integration/tests/
|
||||
|
||||
.PHONY: py-clean
|
||||
py-clean: ## Clear all pycache and pytest cache from tests directory recursively
|
||||
|
||||
171
docs/contributing/tests/e2e.md
Normal file
171
docs/contributing/tests/e2e.md
Normal file
@@ -0,0 +1,171 @@
|
||||
# E2E tests
|
||||
|
||||
Playwright-based end-to-end suite for the SigNoz frontend. Wired into the
|
||||
shared pytest project at `tests/` — pytest fixtures bring up a containerized
|
||||
backend (ClickHouse + Postgres + migrator + SigNoz-with-web), register an
|
||||
admin, and seed dashboards/alerts/telemetry before Playwright runs.
|
||||
|
||||
Source lives at `tests/e2e/`.
|
||||
|
||||
## Layout
|
||||
|
||||
```
|
||||
tests/e2e/
|
||||
bootstrap/
|
||||
setup.py Brings backend + seeder up; writes .env.local
|
||||
run.py One-command entrypoint: subprocesses `yarn test`
|
||||
tests/ Playwright .spec.ts files (per-feature dirs)
|
||||
fixtures/auth.ts authedPage Playwright fixture + ensureLoggedIn helper
|
||||
playwright.config.ts Loads .env (user) + .env.local (generated) via dotenv
|
||||
```
|
||||
|
||||
Each spec owns its own data. Telemetry goes through the seeder
|
||||
(`tests/seeder/`, exposing `/telemetry/{traces,logs,metrics}` POST+DELETE);
|
||||
dashboards, alert rules, and org config go through the SigNoz REST API
|
||||
directly from the spec. No global pre-seeding fixtures.
|
||||
|
||||
## Running
|
||||
|
||||
### One-command local run
|
||||
|
||||
Pytest owns the lifecycle: provisions containers, registers the admin,
|
||||
starts the seeder, writes backend coordinates to `tests/e2e/.env.local`
|
||||
(loaded by `playwright.config.ts` via dotenv), then shells out to
|
||||
`yarn test`:
|
||||
|
||||
```bash
|
||||
cd signoz/tests
|
||||
uv sync # first time only
|
||||
uv run pytest --basetemp=./tmp/ -vv --with-web \
|
||||
e2e/bootstrap/run.py::test_e2e
|
||||
```
|
||||
|
||||
### Iterative Playwright development
|
||||
|
||||
Bring the backend up once (`--reuse` keeps containers warm), then drive
|
||||
Playwright directly:
|
||||
|
||||
```bash
|
||||
cd signoz/tests
|
||||
uv run pytest --basetemp=./tmp/ -vv --reuse --with-web \
|
||||
e2e/bootstrap/setup.py::test_setup
|
||||
|
||||
cd e2e
|
||||
yarn install && yarn install:browsers # first time
|
||||
yarn test # headless
|
||||
yarn test:ui # interactive
|
||||
yarn test:headed # headed
|
||||
yarn test:debug # step-through
|
||||
yarn test tests/roles/roles-listing.spec.ts # single file
|
||||
```
|
||||
|
||||
Teardown:
|
||||
|
||||
```bash
|
||||
cd signoz/tests
|
||||
uv run pytest --basetemp=./tmp/ -vv --teardown \
|
||||
e2e/bootstrap/setup.py::test_teardown
|
||||
```
|
||||
|
||||
### Staging fallback
|
||||
|
||||
Point `SIGNOZ_E2E_BASE_URL` at a remote env via `.env` — no local
|
||||
backend bring-up, no `.env.local` generated, Playwright hits the URL
|
||||
directly:
|
||||
|
||||
```bash
|
||||
cp .env.example .env # fill SIGNOZ_E2E_USERNAME / PASSWORD
|
||||
yarn test:staging
|
||||
```
|
||||
|
||||
### Environment variables
|
||||
|
||||
| Variable | Description |
|
||||
|---|---|
|
||||
| `SIGNOZ_E2E_BASE_URL` | Base URL (staging mode) |
|
||||
| `SIGNOZ_E2E_USERNAME` | Test user email (staging mode) |
|
||||
| `SIGNOZ_E2E_PASSWORD` | Test user password (staging mode) |
|
||||
|
||||
## Writing tests
|
||||
|
||||
```typescript
|
||||
import { expect, test } from '@playwright/test';
|
||||
import { ensureLoggedIn } from '../../fixtures/auth';
|
||||
|
||||
test.describe('Feature name', () => {
|
||||
test.beforeEach(async ({ page }) => {
|
||||
await ensureLoggedIn(page);
|
||||
await page.goto('/feature');
|
||||
});
|
||||
|
||||
test('Test name', async ({ page }) => {
|
||||
// steps
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
### Locator priority
|
||||
|
||||
1. `getByRole('button', { name: 'Submit' })`
|
||||
2. `getByLabel('Email')`
|
||||
3. `getByPlaceholder('...')`
|
||||
4. `getByText('...')`
|
||||
5. `getByTestId('...')`
|
||||
6. `locator('.ant-select')` — last resort (Ant Design dropdowns often have
|
||||
no semantic alternative)
|
||||
|
||||
### Conventions
|
||||
|
||||
- Unique test data: `` const name = `Test ${Date.now()}`; ``
|
||||
- Prefer explicit waits over `page.waitForTimeout(ms)`:
|
||||
```typescript
|
||||
await expect(page.getByRole('dialog')).toBeVisible(); // good
|
||||
await page.waitForTimeout(5000); // avoid
|
||||
```
|
||||
- Never commit `test.only` or untagged tests.
|
||||
|
||||
## AI-assisted test authoring (optional)
|
||||
|
||||
Playwright's `init-agents` workflow is wired up for Claude Code and VS Code
|
||||
Copilot. Agents live in `tests/e2e/.claude/agents/` and
|
||||
`.github/chatmodes/` respectively. Re-run after each Playwright version
|
||||
upgrade:
|
||||
|
||||
```bash
|
||||
npx playwright init-agents --loop=claude
|
||||
npx playwright init-agents --loop=vscode
|
||||
```
|
||||
|
||||
Three agents:
|
||||
|
||||
| Agent | Input | Output |
|
||||
|---|---|---|
|
||||
| `playwright-test-planner` | URL + seed test | Markdown plan (local scratch) |
|
||||
| `playwright-test-generator` | Plan + seed test | `tests/<feature>/<feature>.spec.ts` (validated live) |
|
||||
| `playwright-test-healer` | Failing spec + error | Patched spec, or `test.fixme()` with a reason |
|
||||
|
||||
Planner output is scratch — the `.spec.ts` is the source of truth. A
|
||||
`specs/` dir is `.gitignore`'d for planner use if you want it.
|
||||
|
||||
### CLI vs MCP
|
||||
|
||||
- **Subagents (MCP)**: use for the bounded plan → generate → heal loop.
|
||||
Token overhead is ~4× CLI but acceptable for structured sessions.
|
||||
- **`playwright-cli` directly**: use for quick locator checks, app
|
||||
exploration, ad-hoc debugging. Saves snapshots to `.playwright-cli/`
|
||||
instead of streaming into the LLM context window (~4× fewer tokens).
|
||||
|
||||
```bash
|
||||
playwright-cli open https://app.us.staging.signoz.cloud
|
||||
playwright-cli snapshot # element refs e1, e2, ...
|
||||
playwright-cli fill e5 "term"
|
||||
playwright-cli click e12
|
||||
playwright-cli screenshot
|
||||
playwright-cli console # errors
|
||||
playwright-cli network # requests
|
||||
playwright-cli state-save .playwright-cli/auth.json
|
||||
playwright-cli close
|
||||
```
|
||||
|
||||
For running and debugging test files, `yarn test:debug` / `yarn test:ui` /
|
||||
`yarn codegen` are faster than MCP for simple cases.
|
||||
65
docs/contributing/tests/integration.md
Normal file
65
docs/contributing/tests/integration.md
Normal file
@@ -0,0 +1,65 @@
|
||||
# Integration tests
|
||||
|
||||
Backend integration tests run against a containerized SigNoz stack brought
|
||||
up by pytest fixtures. Live under `tests/integration/`.
|
||||
|
||||
## Layout
|
||||
|
||||
```
|
||||
tests/integration/
|
||||
bootstrap/setup.py Stack lifecycle entrypoint (test_setup, test_teardown)
|
||||
tests/ Suites, one dir per feature area
|
||||
<suite>/ e.g. alerts, dashboard, querier, role, ...
|
||||
NN_<name>.py Numbered test files (collected in order)
|
||||
testdata/ JSON / JSONL / YAML data keyed by suite
|
||||
```
|
||||
|
||||
## Running
|
||||
|
||||
From `signoz/`:
|
||||
|
||||
```bash
|
||||
make py-test-setup # warm up stack (keeps containers under --reuse)
|
||||
make py-test # run all integration suites
|
||||
make py-test-teardown # free containers
|
||||
```
|
||||
|
||||
From `signoz/tests/`:
|
||||
|
||||
```bash
|
||||
uv sync # first time only
|
||||
uv run pytest --basetemp=./tmp/ -vv --reuse integration/bootstrap/setup.py::test_setup
|
||||
uv run pytest --basetemp=./tmp/ -vv --reuse integration/tests/<suite>/<file>.py
|
||||
```
|
||||
|
||||
Always pass `--reuse` — without it, pytest recreates containers on every
|
||||
invocation.
|
||||
|
||||
## Conventions
|
||||
|
||||
- **Filenames**: `NN_<snake_name>.py` (e.g. `01_register.py`). The numeric
|
||||
prefix orders execution within a suite.
|
||||
- **Suite directory**: one dir per feature area under `tests/`. Optionally
|
||||
`<suite>/conftest.py` for suite-local fixtures.
|
||||
- **Fixtures**: shared ones live in `tests/fixtures/` (registered via
|
||||
`tests/conftest.py`'s `pytest_plugins`). Reuse before adding new.
|
||||
- **Data**: test inputs / expected outputs live in `testdata/<suite>/`.
|
||||
Load via `fixtures.fs.get_testdata_file_path`.
|
||||
- **Style**: black + pylint via `make py-fmt` and `make py-lint` before
|
||||
committing (run from repo root).
|
||||
|
||||
## Adding a suite
|
||||
|
||||
1. Create `tests/integration/tests/<suite>/` with an empty `__init__.py`.
|
||||
2. Add `01_<entry>.py` with `test_<thing>(signoz: types.SigNoz)` functions.
|
||||
3. Import shared fixtures directly (e.g.
|
||||
`from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD`).
|
||||
4. If the suite needs bespoke setup, add `conftest.py` alongside the tests.
|
||||
5. Put any test data under `testdata/<suite>/`.
|
||||
|
||||
Running a single test while iterating:
|
||||
|
||||
```bash
|
||||
uv run pytest --basetemp=./tmp/ -vv --reuse \
|
||||
integration/tests/<suite>/<file>.py::test_<name>
|
||||
```
|
||||
19
tests/.dockerignore
Normal file
19
tests/.dockerignore
Normal file
@@ -0,0 +1,19 @@
|
||||
# Build context for tests/Dockerfile.seeder. Keep the context lean — the
|
||||
# seeder image only needs fixtures/ to be importable alongside seeder/,
|
||||
# plus pyproject.toml + uv.lock for dep install.
|
||||
|
||||
.venv
|
||||
.pytest_cache
|
||||
tmp
|
||||
**/__pycache__
|
||||
**/*.pyc
|
||||
|
||||
# e2e Playwright outputs and deps
|
||||
e2e/node_modules
|
||||
e2e/artifacts
|
||||
e2e/.auth
|
||||
e2e/.playwright-cli
|
||||
|
||||
# Integration-side outputs (if any stale dirs remain)
|
||||
integration/tmp
|
||||
integration/testdata
|
||||
35
tests/Dockerfile.seeder
Normal file
35
tests/Dockerfile.seeder
Normal file
@@ -0,0 +1,35 @@
|
||||
# HTTP seeder for Playwright e2e tests. Wraps the direct-ClickHouse-insert
|
||||
# helpers in tests/fixtures/{traces,logs,metrics}.py so a browser test can
|
||||
# seed telemetry with fine-grained control.
|
||||
#
|
||||
# Build context is tests/ (this file sits at its root) so `fixtures/` is
|
||||
# importable inside the image alongside `seeder/`.
|
||||
|
||||
FROM python:3.13-slim
|
||||
|
||||
COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends gcc libpq-dev python3-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install project dependencies from the pytest project's pyproject.toml +
|
||||
# uv.lock so the seeder container's Python env matches local dev exactly
|
||||
# (single source of truth for versions; no parallel requirements.txt).
|
||||
# --no-install-project skips building the signoz-tests project itself
|
||||
# (there is no buildable package here — pyproject is used purely for dep
|
||||
# management alongside pythonpath = ["."]).
|
||||
COPY pyproject.toml uv.lock /app/
|
||||
RUN uv sync --frozen --no-install-project --no-dev
|
||||
ENV PATH="/app/.venv/bin:$PATH"
|
||||
|
||||
# Ship the whole fixtures/ package so server.py can `from fixtures.traces
|
||||
# import ...` with the same module path the pytest side uses.
|
||||
COPY fixtures /app/fixtures
|
||||
COPY seeder /app/seeder
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
CMD ["uvicorn", "seeder.server:app", "--host", "0.0.0.0", "--port", "8080"]
|
||||
@@ -17,12 +17,14 @@ pytest_plugins = [
|
||||
"fixtures.traces",
|
||||
"fixtures.metrics",
|
||||
"fixtures.meter",
|
||||
"fixtures.driver",
|
||||
"fixtures.browser",
|
||||
"fixtures.keycloak",
|
||||
"fixtures.idp",
|
||||
"fixtures.idputils",
|
||||
"fixtures.notification_channel",
|
||||
"fixtures.alerts",
|
||||
"fixtures.cloudintegrations",
|
||||
"fixtures.dashboards",
|
||||
"fixtures.seeder",
|
||||
]
|
||||
|
||||
|
||||
15
tests/e2e/.env.example
Normal file
15
tests/e2e/.env.example
Normal file
@@ -0,0 +1,15 @@
|
||||
# Copy this to .env and fill in values for staging-mode runs.
|
||||
#
|
||||
# This file (.env) holds user-provided defaults — staging credentials, role
|
||||
# override. It is loaded by playwright.config.ts via dotenv.
|
||||
#
|
||||
# Local-mode runs (`cd tests && uv run pytest ... e2e/bootstrap/setup.py::test_setup`)
|
||||
# bring up a containerized backend and write .env.local, which overrides .env.
|
||||
# You do NOT need to touch this file for local mode.
|
||||
|
||||
# Staging base URL (set to opt out of local backend bring-up)
|
||||
SIGNOZ_E2E_BASE_URL=https://app.us.staging.signoz.cloud
|
||||
|
||||
# Test credentials (required only when SIGNOZ_E2E_BASE_URL is set — i.e. staging mode)
|
||||
SIGNOZ_E2E_USERNAME=
|
||||
SIGNOZ_E2E_PASSWORD=
|
||||
38
tests/e2e/.eslintignore
Normal file
38
tests/e2e/.eslintignore
Normal file
@@ -0,0 +1,38 @@
|
||||
# Dependencies
|
||||
node_modules/
|
||||
|
||||
# Build outputs
|
||||
dist/
|
||||
build/
|
||||
|
||||
# Test results
|
||||
test-results/
|
||||
playwright-report/
|
||||
coverage/
|
||||
|
||||
# Environment files
|
||||
.env
|
||||
.env.local
|
||||
.env.production
|
||||
|
||||
# Editor files
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
|
||||
# OS files
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
|
||||
# Runtime data
|
||||
pids
|
||||
*.pid
|
||||
*.seed
|
||||
*.pid.lock
|
||||
68
tests/e2e/.eslintrc.js
Normal file
68
tests/e2e/.eslintrc.js
Normal file
@@ -0,0 +1,68 @@
|
||||
module.exports = {
|
||||
parser: '@typescript-eslint/parser',
|
||||
parserOptions: {
|
||||
ecmaVersion: 2022,
|
||||
sourceType: 'module',
|
||||
},
|
||||
extends: [
|
||||
'eslint:recommended',
|
||||
'plugin:@typescript-eslint/recommended',
|
||||
'plugin:playwright/recommended',
|
||||
],
|
||||
env: {
|
||||
node: true,
|
||||
es2022: true,
|
||||
},
|
||||
rules: {
|
||||
// Code Quality
|
||||
'@typescript-eslint/no-unused-vars': 'error',
|
||||
'@typescript-eslint/no-explicit-any': 'warn',
|
||||
'prefer-const': 'error',
|
||||
'no-var': 'error',
|
||||
|
||||
// Formatting Rules (ESLint handles formatting)
|
||||
'semi': ['error', 'always'],
|
||||
'quotes': ['error', 'single', { avoidEscape: true }],
|
||||
'comma-dangle': ['error', 'always-multiline'],
|
||||
'indent': ['error', 2, { SwitchCase: 1 }],
|
||||
'object-curly-spacing': ['error', 'always'],
|
||||
'array-bracket-spacing': ['error', 'never'],
|
||||
'space-before-function-paren': ['error', {
|
||||
anonymous: 'always',
|
||||
named: 'never',
|
||||
asyncArrow: 'always',
|
||||
}],
|
||||
'keyword-spacing': 'error',
|
||||
'space-infix-ops': 'error',
|
||||
'eol-last': 'error',
|
||||
'no-trailing-spaces': 'error',
|
||||
'no-multiple-empty-lines': ['error', { max: 2, maxEOF: 1 }],
|
||||
|
||||
// Playwright-specific (enhanced)
|
||||
'playwright/expect-expect': 'error',
|
||||
'playwright/no-conditional-in-test': 'error',
|
||||
'playwright/no-page-pause': 'error',
|
||||
'playwright/no-wait-for-timeout': 'warn',
|
||||
'playwright/prefer-web-first-assertions': 'error',
|
||||
|
||||
// Console usage
|
||||
'no-console': ['warn', { allow: ['warn', 'error'] }],
|
||||
},
|
||||
overrides: [
|
||||
{
|
||||
// Config files can use console and have relaxed formatting
|
||||
files: ['*.config.{js,ts}', 'playwright.config.ts'],
|
||||
rules: {
|
||||
'no-console': 'off',
|
||||
'@typescript-eslint/no-explicit-any': 'off',
|
||||
},
|
||||
},
|
||||
{
|
||||
// Test files specific rules
|
||||
files: ['**/*.spec.ts', '**/*.test.ts'],
|
||||
rules: {
|
||||
'@typescript-eslint/no-explicit-any': 'off', // Page objects often need any
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
24
tests/e2e/.gitignore
vendored
Normal file
24
tests/e2e/.gitignore
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
node_modules/
|
||||
# All Playwright output — HTML report, JSON summary, per-test traces /
|
||||
# screenshots / videos. Set via outputDir + reporter paths in playwright.config.ts.
|
||||
/artifacts/
|
||||
/playwright/.cache/
|
||||
.env
|
||||
dist/
|
||||
*.log
|
||||
yarn-error.log
|
||||
.yarn/cache
|
||||
.yarn/install-state.gz
|
||||
.vscode/
|
||||
|
||||
# playwright-cli artifacts (snapshots, screenshots, videos, traces)
|
||||
.playwright-cli/
|
||||
|
||||
# backend coordinates written by the pytest bootstrap (bootstrap/setup.py);
|
||||
# loaded by playwright.config.ts via dotenv override.
|
||||
.env.local
|
||||
|
||||
# AI test-planner scratch (playwright-test-planner writes markdown plans
|
||||
# here before the generator turns them into .spec.ts files; the tests are
|
||||
# the source of truth, plans are regenerable).
|
||||
specs/
|
||||
30
tests/e2e/.prettierignore
Normal file
30
tests/e2e/.prettierignore
Normal file
@@ -0,0 +1,30 @@
|
||||
# Dependencies
|
||||
node_modules/
|
||||
|
||||
# Generated test outputs
|
||||
artifacts/
|
||||
playwright/.cache/
|
||||
|
||||
# Build outputs
|
||||
dist/
|
||||
|
||||
# Environment files
|
||||
.env
|
||||
.env.local
|
||||
.env*.local
|
||||
|
||||
# Lock files
|
||||
yarn.lock
|
||||
package-lock.json
|
||||
pnpm-lock.yaml
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
yarn-error.log
|
||||
|
||||
# IDE
|
||||
.vscode/
|
||||
.idea/
|
||||
|
||||
# Other
|
||||
.DS_Store
|
||||
6
tests/e2e/.prettierrc.json
Normal file
6
tests/e2e/.prettierrc.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"useTabs": false,
|
||||
"tabWidth": 2,
|
||||
"singleQuote": true,
|
||||
"trailingComma": "all"
|
||||
}
|
||||
36
tests/e2e/bootstrap/run.py
Normal file
36
tests/e2e/bootstrap/run.py
Normal file
@@ -0,0 +1,36 @@
|
||||
import os
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
|
||||
|
||||
def test_e2e(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
seeder: types.TestContainerDocker,
|
||||
) -> None:
|
||||
"""
|
||||
One-command e2e: pytest brings up the backend and starts the seeder
|
||||
container, then shells out to `yarn test` so Playwright runs against
|
||||
the provisioned instance. Each spec owns its own data via the seeder.
|
||||
Intended as the primary CI entrypoint.
|
||||
"""
|
||||
e2e_dir = Path(__file__).resolve().parents[1] # bootstrap/ -> e2e/
|
||||
host_cfg = signoz.self.host_configs["8080"]
|
||||
seeder_cfg = seeder.host_configs["8080"]
|
||||
env = {
|
||||
**os.environ,
|
||||
"SIGNOZ_E2E_BASE_URL": host_cfg.base(),
|
||||
"SIGNOZ_E2E_USERNAME": USER_ADMIN_EMAIL,
|
||||
"SIGNOZ_E2E_PASSWORD": USER_ADMIN_PASSWORD,
|
||||
"SIGNOZ_E2E_SEEDER_URL": seeder_cfg.base(),
|
||||
}
|
||||
result = subprocess.run(
|
||||
["yarn", "test"],
|
||||
cwd=str(e2e_dir),
|
||||
env=env,
|
||||
check=False,
|
||||
)
|
||||
assert result.returncode == 0, f"Playwright exited with code {result.returncode}"
|
||||
48
tests/e2e/bootstrap/setup.py
Normal file
48
tests/e2e/bootstrap/setup.py
Normal file
@@ -0,0 +1,48 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
|
||||
|
||||
def _env_file() -> Path:
|
||||
override = os.environ.get("SIGNOZ_E2E_ENV_FILE")
|
||||
if override:
|
||||
return Path(override)
|
||||
# tests/e2e/bootstrap/setup.py -> tests/e2e/.env.local
|
||||
return Path(__file__).resolve().parents[1] / ".env.local"
|
||||
|
||||
|
||||
def test_setup(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
apply_license: types.Operation, # pylint: disable=unused-argument
|
||||
seeder: types.TestContainerDocker,
|
||||
) -> None:
|
||||
"""
|
||||
Bring the SigNoz backend up, register the admin, start the HTTP seeder
|
||||
container, and persist endpoint coordinates for the Playwright side as
|
||||
.env.local (loaded by playwright.config.ts via dotenv; overrides the
|
||||
user-owned .env). Each spec owns its own data via the seeder — no
|
||||
global pre-seed here.
|
||||
"""
|
||||
host_cfg = signoz.self.host_configs["8080"]
|
||||
seeder_cfg = seeder.host_configs["8080"]
|
||||
out = _env_file()
|
||||
out.parent.mkdir(parents=True, exist_ok=True)
|
||||
out.write_text(
|
||||
"# Generated by tests/e2e/bootstrap/setup.py — do not edit.\n"
|
||||
f"SIGNOZ_E2E_BASE_URL={host_cfg.base()}\n"
|
||||
f"SIGNOZ_E2E_USERNAME={USER_ADMIN_EMAIL}\n"
|
||||
f"SIGNOZ_E2E_PASSWORD={USER_ADMIN_PASSWORD}\n"
|
||||
f"SIGNOZ_E2E_SEEDER_URL={seeder_cfg.base()}\n"
|
||||
)
|
||||
|
||||
|
||||
def test_teardown(
|
||||
signoz: types.SigNoz, # pylint: disable=unused-argument
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
apply_license: types.Operation, # pylint: disable=unused-argument
|
||||
seeder: types.TestContainerDocker, # pylint: disable=unused-argument
|
||||
) -> None:
|
||||
"""Companion to test_setup — invoked with --teardown to free containers."""
|
||||
85
tests/e2e/fixtures/auth.ts
Normal file
85
tests/e2e/fixtures/auth.ts
Normal file
@@ -0,0 +1,85 @@
|
||||
import {
|
||||
test as base,
|
||||
expect,
|
||||
type Browser,
|
||||
type BrowserContext,
|
||||
type Page,
|
||||
} from '@playwright/test';
|
||||
|
||||
export type User = { email: string; password: string };
|
||||
|
||||
// Default user — admin from the pytest bootstrap (.env.local) or staging .env.
|
||||
export const ADMIN: User = {
|
||||
email: process.env.SIGNOZ_E2E_USERNAME!,
|
||||
password: process.env.SIGNOZ_E2E_PASSWORD!,
|
||||
};
|
||||
|
||||
// Per-worker storageState cache. One login per unique user per worker.
|
||||
// Promise-valued so concurrent requests share the same in-flight work.
|
||||
// Held in memory only — no .auth/ dir, no JSON on disk.
|
||||
type StorageState = Awaited<ReturnType<BrowserContext['storageState']>>;
|
||||
const storageByUser = new Map<string, Promise<StorageState>>();
|
||||
|
||||
async function storageFor(browser: Browser, user: User): Promise<StorageState> {
|
||||
const cached = storageByUser.get(user.email);
|
||||
if (cached) return cached;
|
||||
|
||||
const task = (async () => {
|
||||
const ctx = await browser.newContext();
|
||||
const page = await ctx.newPage();
|
||||
await login(page, user);
|
||||
const state = await ctx.storageState();
|
||||
await ctx.close();
|
||||
return state;
|
||||
})();
|
||||
|
||||
storageByUser.set(user.email, task);
|
||||
return task;
|
||||
}
|
||||
|
||||
async function login(page: Page, user: User): Promise<void> {
|
||||
if (!user.email || !user.password) {
|
||||
throw new Error(
|
||||
'User credentials missing. Set SIGNOZ_E2E_USERNAME / SIGNOZ_E2E_PASSWORD ' +
|
||||
'(pytest bootstrap writes them to .env.local), or pass a User via test.use({ user: ... }).',
|
||||
);
|
||||
}
|
||||
await page.goto('/login?password=Y');
|
||||
await page.getByTestId('email').fill(user.email);
|
||||
await page.getByTestId('initiate_login').click();
|
||||
await page.getByTestId('password').fill(user.password);
|
||||
await page.getByRole('button', { name: 'Sign in with Password' }).click();
|
||||
// Post-login lands somewhere different depending on whether the org is
|
||||
// licensed (onboarding flow on ENTERPRISE) or not (legacy "Hello there"
|
||||
// welcome). Wait for URL to move off /login — whichever page follows
|
||||
// is fine, each spec navigates to the feature under test anyway.
|
||||
await page.waitForURL((url) => !url.pathname.startsWith('/login'));
|
||||
}
|
||||
|
||||
export const test = base.extend<{
|
||||
/**
|
||||
* User identity for this test. Override with `test.use({ user: ... })` at
|
||||
* the describe or test level to run the suite as a different user.
|
||||
* Defaults to ADMIN (the pytest-bootstrap-seeded admin).
|
||||
*/
|
||||
user: User;
|
||||
|
||||
/**
|
||||
* A Page whose context is already authenticated as `user`. First request
|
||||
* for a given user triggers one login per worker; the resulting
|
||||
* storageState is held in memory and reused for all later requests.
|
||||
*/
|
||||
authedPage: Page;
|
||||
}>({
|
||||
user: [ADMIN, { option: true }],
|
||||
|
||||
authedPage: async ({ browser, user }, use) => {
|
||||
const storageState = await storageFor(browser, user);
|
||||
const ctx = await browser.newContext({ storageState });
|
||||
const page = await ctx.newPage();
|
||||
await use(page);
|
||||
await ctx.close();
|
||||
},
|
||||
});
|
||||
|
||||
export { expect };
|
||||
736
tests/e2e/legacy/alerts/alerts-downtime.spec.ts
Normal file
736
tests/e2e/legacy/alerts/alerts-downtime.spec.ts
Normal file
@@ -0,0 +1,736 @@
|
||||
// Playwright replay of platform-pod/issues/2095 alerts + planned-downtime
|
||||
// regression suite. Derived from run-3 selectors.
|
||||
//
|
||||
// Run: yarn test tests/alerts-downtime/alerts-downtime.spec.ts
|
||||
// baseURL + storageState come from playwright.config.ts; env is populated by
|
||||
// the pytest bootstrap (or .env for staging mode). The 2095 flows mutate
|
||||
// shared tenant state, so run them serially regardless of config-level
|
||||
// fullyParallel.
|
||||
|
||||
import { test, expect } from '../../fixtures/auth';
|
||||
|
||||
const E2E_TAG = `e2e-2095-${Math.floor(Date.now() / 1000)}`;
|
||||
|
||||
test.describe('SUITE.md — platform-pod/issues/2095 regression', () => {
|
||||
// Serial: 2095 flows mutate shared tenant state (one flow's rules show up in
|
||||
// another flow's list; toasts from test A block clicks in test B).
|
||||
test.describe.configure({ mode: 'serial' });
|
||||
|
||||
test('Flow 1 — alerts list, toggle, delete (depends on Flow 2 create)', async ({ authedPage: page }) => {
|
||||
// Seed: create a rule via the list's 'New Alert Rule' flow.
|
||||
await page.goto(`/alerts?tab=AlertRules`);
|
||||
|
||||
// Seed via direct fetch — UI metric/channel pickers are unreliable from the CLI too
|
||||
// (Ant Select onChange is brittle under test-runner speed). Same pattern as Flow 5.
|
||||
const seedId = await page.evaluate(async ({ name }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const body = {
|
||||
alert: name,
|
||||
alertType: 'METRIC_BASED_ALERT',
|
||||
ruleType: 'threshold_rule',
|
||||
condition: {
|
||||
thresholds: { kind: 'basic', spec: [{ name: 'critical', target: 0, matchType: '1', op: '1', channels: [], targetUnit: '' }] },
|
||||
compositeQuery: {
|
||||
queryType: 'builder', panelType: 'graph',
|
||||
queries: [{
|
||||
type: 'builder_query',
|
||||
spec: {
|
||||
name: 'A', signal: 'metrics', source: '', stepInterval: null, disabled: false,
|
||||
filter: { expression: '' }, having: { expression: '' },
|
||||
aggregations: [{ metricName: 'app.currency_counter', timeAggregation: 'rate', spaceAggregation: 'sum' }],
|
||||
},
|
||||
}],
|
||||
},
|
||||
selectedQueryName: 'A',
|
||||
alertOnAbsent: false,
|
||||
requireMinPoints: false,
|
||||
},
|
||||
annotations: { description: 'spec.ts flow-1', summary: 'spec.ts flow-1' },
|
||||
labels: {},
|
||||
notificationSettings: { groupBy: [], usePolicy: true, renotify: { enabled: false, interval: '30m', alertStates: [] } },
|
||||
evaluation: { kind: 'rolling', spec: { evalWindow: '5m0s', frequency: '1m' } },
|
||||
schemaVersion: 'v2alpha1', source: 'spec.ts-flow-1', version: 'v5',
|
||||
};
|
||||
const resp = await fetch('/api/v2/rules', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
const json = await resp.json();
|
||||
if (resp.status !== 201) throw new Error(`flow-1 seed POST: ${resp.status} ${JSON.stringify(json)}`);
|
||||
return json.data.id as string;
|
||||
}, { name: `${E2E_TAG}-create` });
|
||||
void seedId; // rule id not needed for UI assertions below
|
||||
await page.goto(`/alerts?tab=AlertRules`);
|
||||
|
||||
// Open action menu
|
||||
await page.locator('tbody tr', { hasText: `${E2E_TAG}-create` }).locator('.ant-dropdown-trigger, .dropdown-button').click();
|
||||
await expect(page.getByRole('menuitem', { name: /^disable$/i })).toBeVisible();
|
||||
|
||||
// Disable
|
||||
await page.getByRole('menuitem', { name: /^disable$/i }).click();
|
||||
await page.waitForResponse(r => r.url().includes('/api/v2/rules/') && r.request().method() === 'PATCH');
|
||||
await expect(page.locator('tbody tr', { hasText: `${E2E_TAG}-create` })).toContainText(/disabled/i);
|
||||
|
||||
// Enable
|
||||
await page.locator('tbody tr', { hasText: `${E2E_TAG}-create` }).locator('.ant-dropdown-trigger, .dropdown-button').click();
|
||||
await page.getByRole('menuitem', { name: /^enable$/i }).click();
|
||||
await page.waitForResponse(r => r.url().includes('/api/v2/rules/') && r.request().method() === 'PATCH');
|
||||
|
||||
// Delete
|
||||
await page.locator('tbody tr', { hasText: `${E2E_TAG}-create` }).locator('.ant-dropdown-trigger, .dropdown-button').click();
|
||||
await page.getByRole('menuitem', { name: /^delete$/i }).click();
|
||||
await page.waitForResponse(r => r.url().includes('/api/v2/rules/') && r.request().method() === 'DELETE');
|
||||
// Assert the specific E2E_TAG row is gone. A tenant-wide "no alert rules yet"
|
||||
// assertion is unreliable because other tests / leftover rules may coexist.
|
||||
await expect(page.locator('tbody tr', { hasText: `${E2E_TAG}-create` })).toHaveCount(0);
|
||||
});
|
||||
|
||||
test('Flow 2 — create, edit, clone, labels round-trip', async ({ authedPage: page }) => {
|
||||
// Navigate to establish the origin for localStorage/cookies before direct-fetch.
|
||||
await page.goto(`/alerts?tab=AlertRules`);
|
||||
|
||||
// 2.8 — create with labels via direct fetch (metric/channel UI pickers are too brittle
|
||||
// in sequential CLI runs for load-bearing creates). We assert on the BE roundtrip.
|
||||
const labeledId = await page.evaluate(async ({ name }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const body = {
|
||||
alert: name,
|
||||
alertType: 'METRIC_BASED_ALERT',
|
||||
ruleType: 'threshold_rule',
|
||||
condition: {
|
||||
thresholds: { kind: 'basic', spec: [{ name: 'critical', target: 0, matchType: '1', op: '1', channels: [], targetUnit: '' }] },
|
||||
compositeQuery: {
|
||||
queryType: 'builder', panelType: 'graph',
|
||||
queries: [{
|
||||
type: 'builder_query',
|
||||
spec: {
|
||||
name: 'A', signal: 'metrics', source: '', stepInterval: null, disabled: false,
|
||||
filter: { expression: '' }, having: { expression: '' },
|
||||
aggregations: [{ metricName: 'app.currency_counter', timeAggregation: 'rate', spaceAggregation: 'sum' }],
|
||||
},
|
||||
}],
|
||||
},
|
||||
selectedQueryName: 'A',
|
||||
alertOnAbsent: false,
|
||||
requireMinPoints: false,
|
||||
},
|
||||
annotations: { description: `${name}-desc`, summary: `${name}-summary` },
|
||||
labels: { env: 'prod', severity: 'warn' },
|
||||
notificationSettings: { groupBy: [], usePolicy: true, renotify: { enabled: false, interval: '30m', alertStates: [] } },
|
||||
evaluation: { kind: 'rolling', spec: { evalWindow: '5m0s', frequency: '1m' } },
|
||||
schemaVersion: 'v2alpha1', source: 'spec.ts-flow-2', version: 'v5',
|
||||
};
|
||||
const resp = await fetch('/api/v2/rules', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
const json = await resp.json();
|
||||
if (resp.status !== 201) throw new Error(`flow-2 labels POST: ${resp.status} ${JSON.stringify(json)}`);
|
||||
return json.data.id as string;
|
||||
}, { name: `${E2E_TAG}-labels` });
|
||||
await page.goto(`/alerts?tab=AlertRules`);
|
||||
await expect(page.getByText(`${E2E_TAG}-labels`)).toBeVisible();
|
||||
|
||||
// 2.9 — hydration: visit the overview URL directly and confirm label pills render.
|
||||
await page.goto(`/alerts/overview?ruleId=${labeledId}`);
|
||||
await expect(page.getByTestId(/label-pill-env-prod/)).toBeVisible();
|
||||
await expect(page.getByTestId(/label-pill-severity-warn/)).toBeVisible();
|
||||
|
||||
// 2.10 — remove severity label via PUT (bypasses label-input remove-button UI which
|
||||
// relies on a testid that may not be present in edit mode across all versions).
|
||||
await page.evaluate(async ({ id, name }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const body = {
|
||||
alert: name,
|
||||
alertType: 'METRIC_BASED_ALERT',
|
||||
ruleType: 'threshold_rule',
|
||||
condition: {
|
||||
thresholds: { kind: 'basic', spec: [{ name: 'critical', target: 0, matchType: '1', op: '1', channels: [], targetUnit: '' }] },
|
||||
compositeQuery: {
|
||||
queryType: 'builder', panelType: 'graph',
|
||||
queries: [{
|
||||
type: 'builder_query',
|
||||
spec: {
|
||||
name: 'A', signal: 'metrics', source: '', stepInterval: null, disabled: false,
|
||||
filter: { expression: '' }, having: { expression: '' },
|
||||
aggregations: [{ metricName: 'app.currency_counter', timeAggregation: 'rate', spaceAggregation: 'sum' }],
|
||||
},
|
||||
}],
|
||||
},
|
||||
selectedQueryName: 'A',
|
||||
alertOnAbsent: false,
|
||||
requireMinPoints: false,
|
||||
},
|
||||
annotations: { description: `${name}-desc`, summary: `${name}-summary` },
|
||||
labels: { env: 'prod' },
|
||||
notificationSettings: { groupBy: [], usePolicy: true, renotify: { enabled: false, interval: '30m', alertStates: [] } },
|
||||
evaluation: { kind: 'rolling', spec: { evalWindow: '5m0s', frequency: '1m' } },
|
||||
schemaVersion: 'v2alpha1', source: 'spec.ts-flow-2', version: 'v5',
|
||||
};
|
||||
await fetch(`/api/v2/rules/${id}`, {
|
||||
method: 'PUT',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
}, { id: labeledId, name: `${E2E_TAG}-labels` });
|
||||
await page.goto(`/alerts/overview?ruleId=${labeledId}`);
|
||||
await expect(page.getByTestId(/label-pill-env-prod/)).toBeVisible();
|
||||
await expect(page.getByTestId(/label-pill-severity-warn/)).toHaveCount(0);
|
||||
|
||||
// Cleanup
|
||||
await page.evaluate(async ({ id }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
await fetch(`/api/v2/rules/${id}`, {
|
||||
method: 'DELETE',
|
||||
headers: { Authorization: `Bearer ${token}` },
|
||||
});
|
||||
}, { id: labeledId });
|
||||
});
|
||||
|
||||
test('Flow 2 — Test Notification (2.11 success, 2.12 empty-result, 2.13 disabled-while-invalid)', async ({ authedPage: page }) => {
|
||||
await page.goto(`/alerts/new`);
|
||||
|
||||
// 2.13 disabled pre-state — fresh form, no name, no metric
|
||||
const testBtn = page.getByRole('button', { name: /test notification/i });
|
||||
await expect(testBtn).toBeDisabled();
|
||||
|
||||
// 2.11 / 2.12 — direct-fetch POST /api/v2/rules/test. Driving the V2 form's metric +
|
||||
// channel pickers via CLI is brittle (Ant Select onChange behavior varies); the API
|
||||
// contract is what matters for this flow's regression probe. UI-driven enable-after-fill
|
||||
// for 2.13 is covered via run-5's interactive replay.
|
||||
const buildTestBody = (target: number) => ({
|
||||
alert: `${E2E_TAG}-test-notif`,
|
||||
alertType: 'METRIC_BASED_ALERT',
|
||||
ruleType: 'threshold_rule',
|
||||
condition: {
|
||||
thresholds: { kind: 'basic', spec: [{ name: 'critical', target, matchType: '1', op: '1', channels: [], targetUnit: '' }] },
|
||||
compositeQuery: {
|
||||
queryType: 'builder', panelType: 'graph',
|
||||
queries: [{
|
||||
type: 'builder_query',
|
||||
spec: {
|
||||
name: 'A', signal: 'metrics', source: '', stepInterval: null, disabled: false,
|
||||
filter: { expression: '' }, having: { expression: '' },
|
||||
aggregations: [{ metricName: 'app.currency_counter', timeAggregation: 'rate', spaceAggregation: 'sum' }],
|
||||
},
|
||||
}],
|
||||
},
|
||||
selectedQueryName: 'A',
|
||||
alertOnAbsent: false,
|
||||
requireMinPoints: false,
|
||||
},
|
||||
annotations: { description: `${E2E_TAG}-test-notif`, summary: `${E2E_TAG}-test-notif` },
|
||||
labels: {},
|
||||
notificationSettings: { groupBy: [], usePolicy: true, renotify: { enabled: false, interval: '30m', alertStates: [] } },
|
||||
evaluation: { kind: 'rolling', spec: { evalWindow: '5m0s', frequency: '1m' } },
|
||||
schemaVersion: 'v2alpha1', source: 'spec.ts-flow-2-test-notif', version: 'v5',
|
||||
});
|
||||
|
||||
const body211 = await page.evaluate(async (body) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const resp = await fetch('/api/v2/rules/test', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
return { status: resp.status, body: await resp.json() };
|
||||
}, buildTestBody(0));
|
||||
expect(body211.status).toBe(200);
|
||||
expect(body211.body.data.alertCount).toBeGreaterThan(0);
|
||||
|
||||
const body212 = await page.evaluate(async (body) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const resp = await fetch('/api/v2/rules/test', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
return { status: resp.status, body: await resp.json() };
|
||||
}, buildTestBody(1e18));
|
||||
expect(body212.status).toBe(200);
|
||||
// NOTE (run-5 finding): /api/v2/rules/test bypasses threshold evaluation via
|
||||
// WithSendUnmatched() (pkg/query-service/rules/test_notification.go:52-53), so an
|
||||
// unsatisfiable threshold still yields alertCount >= 1. Assert on the contract only.
|
||||
expect(body212.body.data).toHaveProperty('alertCount');
|
||||
});
|
||||
|
||||
test('Flow 3 — alert details and AlertNotFound', async ({ authedPage: page }) => {
|
||||
// Seed via direct fetch (same reasoning as Flow 1/2-main).
|
||||
await page.goto(`/alerts?tab=AlertRules`);
|
||||
const ruleId = await page.evaluate(async ({ name }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const body = {
|
||||
alert: name,
|
||||
alertType: 'METRIC_BASED_ALERT',
|
||||
ruleType: 'threshold_rule',
|
||||
condition: {
|
||||
thresholds: { kind: 'basic', spec: [{ name: 'critical', target: 0, matchType: '1', op: '1', channels: [], targetUnit: '' }] },
|
||||
compositeQuery: {
|
||||
queryType: 'builder', panelType: 'graph',
|
||||
queries: [{
|
||||
type: 'builder_query',
|
||||
spec: {
|
||||
name: 'A', signal: 'metrics', source: '', stepInterval: null, disabled: false,
|
||||
filter: { expression: '' }, having: { expression: '' },
|
||||
aggregations: [{ metricName: 'app.currency_counter', timeAggregation: 'rate', spaceAggregation: 'sum' }],
|
||||
},
|
||||
}],
|
||||
},
|
||||
selectedQueryName: 'A',
|
||||
alertOnAbsent: false,
|
||||
requireMinPoints: false,
|
||||
},
|
||||
annotations: { description: 'spec.ts flow-3', summary: 'spec.ts flow-3' },
|
||||
labels: { severity: 'warning' },
|
||||
notificationSettings: { groupBy: [], usePolicy: true, renotify: { enabled: false, interval: '30m', alertStates: [] } },
|
||||
evaluation: { kind: 'rolling', spec: { evalWindow: '5m0s', frequency: '1m' } },
|
||||
schemaVersion: 'v2alpha1', source: 'spec.ts-flow-3', version: 'v5',
|
||||
};
|
||||
const resp = await fetch('/api/v2/rules', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
const json = await resp.json();
|
||||
if (resp.status !== 201) throw new Error(`flow-3 seed POST: ${resp.status} ${JSON.stringify(json)}`);
|
||||
return json.data.id as string;
|
||||
}, { name: `${E2E_TAG}-details` });
|
||||
|
||||
// 3.1–3.3 — valid overview + history
|
||||
await page.goto(`/alerts/overview?ruleId=${ruleId}`);
|
||||
await expect(page.locator('.alert-header__input, [data-testid=alert-name-input]')).toBeVisible();
|
||||
await page.getByRole('tab', { name: /history/i }).click();
|
||||
await expect(page.getByText(/total triggered/i)).toBeVisible();
|
||||
|
||||
// 3.4 — bogus UUID
|
||||
await page.goto(`/alerts/overview?ruleId=00000000-0000-0000-0000-000000000000`);
|
||||
await expect(page).toHaveTitle('Alert Not Found');
|
||||
|
||||
// 3.5 — missing ruleId
|
||||
await page.goto(`/alerts/overview`);
|
||||
await expect(page.getByText(/we couldn'?t find/i)).toBeVisible();
|
||||
|
||||
// 3.6 — delete via direct fetch, then revisit
|
||||
await page.evaluate(async ({ id }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
await fetch(`/api/v2/rules/${id}`, { method: 'DELETE', headers: { Authorization: `Bearer ${token}` } });
|
||||
}, { id: ruleId });
|
||||
await page.goto(`/alerts/overview?ruleId=${ruleId}`);
|
||||
await expect(page).toHaveTitle('Alert Not Found');
|
||||
});
|
||||
|
||||
test('Flow 4 — planned downtime CRUD', async ({ authedPage: page }) => {
|
||||
// 4.1a — direct URL.
|
||||
// The "no data" copy is tenant-state-dependent; assert the list renders (header row) instead.
|
||||
await page.goto(`/alerts?tab=Configuration&subTab=planned-downtime`);
|
||||
await expect(page.locator('table, .ant-spin').first()).toBeVisible();
|
||||
|
||||
// 4.1b — tab click
|
||||
await page.goto(`/alerts?tab=AlertRules`);
|
||||
await page.getByRole('tab', { name: /configuration/i }).click();
|
||||
await expect(page.locator('table, .ant-spin').first()).toBeVisible();
|
||||
|
||||
// 4.3 — empty-form validation (click Add with just the name)
|
||||
await page.getByRole('button', { name: /new downtime/i }).click();
|
||||
await page.locator('#create-form_name').fill(`${E2E_TAG}-downtime-once`);
|
||||
await page.getByRole('button', { name: /add downtime schedule/i }).click();
|
||||
await expect(page.getByText(/please enter ends on/i)).toBeVisible();
|
||||
|
||||
// 4.2 — create via direct fetch.
|
||||
// The Ant DatePicker calendar-cell clicks are unreliable (cells-in-view index varies
|
||||
// across months; title-based selectors require tomorrow's date to be computed in the
|
||||
// displayed timezone). The 2095 refactor doesn't touch the DatePicker logic; UI-probing
|
||||
// this step adds flakiness without improving coverage. We skip the calendar UI and
|
||||
// POST directly. The list assertions below still verify the BE roundtrip.
|
||||
await page.keyboard.press('Escape');
|
||||
const downtimeId = await page.evaluate(async ({ name }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const now = Date.now();
|
||||
const body = {
|
||||
name,
|
||||
description: 'spec.ts downtime',
|
||||
schedule: {
|
||||
timezone: 'UTC',
|
||||
startTime: new Date(now).toISOString(),
|
||||
endTime: new Date(now + 24 * 60 * 60 * 1000).toISOString(),
|
||||
recurrence: null,
|
||||
},
|
||||
alertIds: [],
|
||||
};
|
||||
const resp = await fetch('/api/v1/downtime_schedules', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
const json = await resp.json();
|
||||
if (resp.status >= 300) throw new Error(`POST /downtime_schedules: ${resp.status} ${JSON.stringify(json)}`);
|
||||
return json.data?.id ?? json.id;
|
||||
}, { name: `${E2E_TAG}-downtime-once` });
|
||||
await page.goto(`/alerts?tab=Configuration&subTab=planned-downtime`);
|
||||
// The downtime list uses accordion/card layout, not a real <tr>. Assert by visible text.
|
||||
await expect(page.getByText(`${E2E_TAG}-downtime-once`)).toBeVisible();
|
||||
|
||||
// 4.4 — edit via direct fetch (same reasoning as 4.2: the pencil icon is a lucide SVG
|
||||
// that historically requires DOM injection to be reliably clickable — run-4 documented
|
||||
// this. UI-probing adds flake without covering 2095 refactor scope).
|
||||
await page.evaluate(async ({ id, name }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const now = Date.now();
|
||||
const body = {
|
||||
name,
|
||||
description: 'spec.ts downtime edited',
|
||||
schedule: {
|
||||
timezone: 'UTC',
|
||||
startTime: new Date(now).toISOString(),
|
||||
endTime: new Date(now + 24 * 60 * 60 * 1000).toISOString(),
|
||||
recurrence: null,
|
||||
},
|
||||
alertIds: [],
|
||||
};
|
||||
const resp = await fetch(`/api/v1/downtime_schedules/${id}`, {
|
||||
method: 'PUT',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
if (resp.status >= 300) {
|
||||
const j = await resp.text();
|
||||
throw new Error(`PUT /downtime_schedules: ${resp.status} ${j}`);
|
||||
}
|
||||
}, { id: downtimeId, name: `${E2E_TAG}-downtime-edited` });
|
||||
await page.reload();
|
||||
await expect(page.getByText(`${E2E_TAG}-downtime-edited`)).toBeVisible();
|
||||
|
||||
// 4.5 — delete via direct fetch; verify UI reflects the delete.
|
||||
await page.evaluate(async ({ id }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const resp = await fetch(`/api/v1/downtime_schedules/${id}`, {
|
||||
method: 'DELETE',
|
||||
headers: { Authorization: `Bearer ${token}` },
|
||||
});
|
||||
if (resp.status >= 300) throw new Error(`DELETE /downtime_schedules: ${resp.status}`);
|
||||
}, { id: downtimeId });
|
||||
await page.reload();
|
||||
await expect(page.getByText(`${E2E_TAG}-downtime-edited`)).toHaveCount(0);
|
||||
});
|
||||
|
||||
test('Flow 6 — anomaly alerts (6.1 type-selection, 6.2 classic-form entry, 6.4 create, 6.5 edit z-score, 6.6 toggle, 6.7 delete, 6.8 AlertNotFound)', async ({ authedPage: page }) => {
|
||||
// 6.1 — type-selection page
|
||||
await page.goto(`/alerts/type-selection`);
|
||||
const anomalyCard = page.getByTestId('alert-type-card-ANOMALY_BASED_ALERT');
|
||||
await expect(anomalyCard).toBeVisible();
|
||||
await expect(anomalyCard.getByText('Beta')).toBeVisible();
|
||||
|
||||
// 6.2 — click Anomaly card → classic form with anomaly tab selected
|
||||
await anomalyCard.click();
|
||||
await page.waitForURL(/ruleType=anomaly_rule.*alertType=METRIC_BASED_ALERT/);
|
||||
const anomalyTabBtn = page.locator('button[value="anomaly_rule"]');
|
||||
await expect(anomalyTabBtn).toHaveClass(/selected/);
|
||||
// Confirm classic, not V2
|
||||
expect(await page.locator('.create-alert-v2-footer').count()).toBe(0);
|
||||
|
||||
// 6.4 — create via direct fetch (UI Ant Select metric/channel pickers are unreliable from MCP).
|
||||
// Pre-convert namedArgs → args:[{name,value}] because v5 builder spec rejects namedArgs.
|
||||
const ruleId = await page.evaluate(async ({ name }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const body = {
|
||||
alert: name,
|
||||
alertType: 'METRIC_BASED_ALERT',
|
||||
ruleType: 'anomaly_rule',
|
||||
condition: {
|
||||
thresholds: { kind: 'basic', spec: [{ name: 'critical', target: 3, matchType: '1', op: '1', channels: [], targetUnit: '' }] },
|
||||
compositeQuery: {
|
||||
queryType: 'builder',
|
||||
panelType: 'graph',
|
||||
queries: [{
|
||||
type: 'builder_query',
|
||||
spec: {
|
||||
name: 'A', signal: 'metrics', source: '', stepInterval: null, disabled: false,
|
||||
filter: { expression: '' }, having: { expression: '' },
|
||||
aggregations: [{ metricName: 'app.currency_counter', timeAggregation: 'rate', spaceAggregation: 'sum' }],
|
||||
functions: [{ name: 'anomaly', args: [{ name: 'z_score_threshold', value: 3 }] }],
|
||||
},
|
||||
}],
|
||||
},
|
||||
selectedQueryName: 'A',
|
||||
alertOnAbsent: false,
|
||||
requireMinPoints: false,
|
||||
algorithm: 'standard',
|
||||
seasonality: 'hourly',
|
||||
},
|
||||
annotations: { description: 'spec.ts anomaly', summary: 'spec.ts anomaly' },
|
||||
labels: { severity: 'warning' },
|
||||
notificationSettings: { groupBy: [], usePolicy: true, renotify: { enabled: false, interval: '30m', alertStates: [] } },
|
||||
evaluation: { kind: 'rolling', spec: { evalWindow: '5m0s', frequency: '1m' } },
|
||||
schemaVersion: 'v2alpha1',
|
||||
source: 'spec.ts-flow-6',
|
||||
version: 'v5',
|
||||
};
|
||||
const resp = await fetch('/api/v2/rules', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
const json = await resp.json();
|
||||
if (resp.status !== 201) throw new Error(`POST /api/v2/rules failed: ${resp.status} ${JSON.stringify(json)}`);
|
||||
return json.data.id as string;
|
||||
}, { name: `${E2E_TAG}-anomaly` });
|
||||
|
||||
// 6.5 — PUT z-score 3→5
|
||||
await page.evaluate(async ({ id, name }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const body = {
|
||||
alert: name,
|
||||
alertType: 'METRIC_BASED_ALERT',
|
||||
ruleType: 'anomaly_rule',
|
||||
condition: {
|
||||
thresholds: { kind: 'basic', spec: [{ name: 'critical', target: 5, matchType: '1', op: '1', channels: [], targetUnit: '' }] },
|
||||
compositeQuery: {
|
||||
queryType: 'builder', panelType: 'graph',
|
||||
queries: [{
|
||||
type: 'builder_query',
|
||||
spec: {
|
||||
name: 'A', signal: 'metrics', source: '', stepInterval: null, disabled: false,
|
||||
filter: { expression: '' }, having: { expression: '' },
|
||||
aggregations: [{ metricName: 'app.currency_counter', timeAggregation: 'rate', spaceAggregation: 'sum' }],
|
||||
functions: [{ name: 'anomaly', args: [{ name: 'z_score_threshold', value: 5 }] }],
|
||||
},
|
||||
}],
|
||||
},
|
||||
selectedQueryName: 'A',
|
||||
alertOnAbsent: false,
|
||||
requireMinPoints: false,
|
||||
algorithm: 'standard',
|
||||
seasonality: 'hourly',
|
||||
},
|
||||
annotations: { description: 'spec.ts anomaly', summary: 'spec.ts anomaly' },
|
||||
labels: { severity: 'warning' },
|
||||
notificationSettings: { groupBy: [], usePolicy: true, renotify: { enabled: false, interval: '30m', alertStates: [] } },
|
||||
evaluation: { kind: 'rolling', spec: { evalWindow: '5m0s', frequency: '1m' } },
|
||||
schemaVersion: 'v2alpha1', source: 'spec.ts-flow-6', version: 'v5',
|
||||
};
|
||||
const resp = await fetch(`/api/v2/rules/${id}`, {
|
||||
method: 'PUT',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
if (resp.status !== 204) throw new Error(`PUT /api/v2/rules/${id} failed: ${resp.status}`);
|
||||
}, { id: ruleId, name: `${E2E_TAG}-anomaly` });
|
||||
|
||||
// 6.6 — detection-method toggle is asymmetric: anomaly → threshold transitions classic → V2.
|
||||
// (See run-6 RUN_REPORT.md observation #1. SUITE.md may be amended to reflect this.)
|
||||
await page.goto(`/alerts/new?ruleType=anomaly_rule&alertType=METRIC_BASED_ALERT`);
|
||||
const thresholdTabBtn = page.locator('button[value="threshold_rule"]');
|
||||
await thresholdTabBtn.click();
|
||||
await expect(page).toHaveURL(/ruleType=threshold_rule/);
|
||||
// V2 footer is now present, detection-method tabs are gone — no return path
|
||||
await expect(page.locator('.create-alert-v2-footer')).toBeVisible();
|
||||
expect(await page.locator('button[value="anomaly_rule"]').count()).toBe(0);
|
||||
|
||||
// 6.7 — DELETE
|
||||
await page.evaluate(async ({ id }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const resp = await fetch(`/api/v2/rules/${id}`, {
|
||||
method: 'DELETE',
|
||||
headers: { Authorization: `Bearer ${token}` },
|
||||
});
|
||||
if (resp.status !== 204) throw new Error(`DELETE /api/v2/rules/${id} failed: ${resp.status}`);
|
||||
}, { id: ruleId });
|
||||
|
||||
// 6.8 — AlertNotFound for the deleted anomaly rule
|
||||
await page.goto(`/alerts/overview?ruleId=${ruleId}`);
|
||||
await expect(page).toHaveTitle('Alert Not Found');
|
||||
|
||||
// 6.9 — POST /api/v2/rules/test with the anomaly DTO. The classic anomaly form has no
|
||||
// Test Notification button (V2-only feature), so this is a direct-fetch API-contract probe.
|
||||
// Same SendUnmatched bypass as run-5: alertCount: 0 is reachable only via a zero-data query.
|
||||
const test69 = await page.evaluate(async ({ name }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const body = {
|
||||
alert: name,
|
||||
alertType: 'METRIC_BASED_ALERT',
|
||||
ruleType: 'anomaly_rule',
|
||||
condition: {
|
||||
thresholds: { kind: 'basic', spec: [{ name: 'critical', target: 3, matchType: '1', op: '1', channels: [], targetUnit: '' }] },
|
||||
compositeQuery: {
|
||||
queryType: 'builder', panelType: 'graph',
|
||||
queries: [{
|
||||
type: 'builder_query',
|
||||
spec: {
|
||||
name: 'A', signal: 'metrics', source: '', stepInterval: null, disabled: false,
|
||||
filter: { expression: '' }, having: { expression: '' },
|
||||
aggregations: [{ metricName: 'app.currency_counter', timeAggregation: 'rate', spaceAggregation: 'sum' }],
|
||||
functions: [{ name: 'anomaly', args: [{ name: 'z_score_threshold', value: 3 }] }],
|
||||
},
|
||||
}],
|
||||
},
|
||||
selectedQueryName: 'A',
|
||||
alertOnAbsent: false,
|
||||
requireMinPoints: false,
|
||||
algorithm: 'standard',
|
||||
seasonality: 'hourly',
|
||||
},
|
||||
annotations: { description: 'spec.ts anomaly test-notification', summary: 'spec.ts anomaly test-notification' },
|
||||
labels: { severity: 'warning' },
|
||||
notificationSettings: { groupBy: [], usePolicy: true, renotify: { enabled: false, interval: '30m', alertStates: [] } },
|
||||
evaluation: { kind: 'rolling', spec: { evalWindow: '5m0s', frequency: '1m' } },
|
||||
schemaVersion: 'v2alpha1', source: 'spec.ts-flow-6-step6.9', version: 'v5',
|
||||
};
|
||||
const resp = await fetch('/api/v2/rules/test', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
const json = await resp.json();
|
||||
return { status: resp.status, body: json };
|
||||
}, { name: `${E2E_TAG}-anomaly-test` });
|
||||
expect(test69.status).toBe(200);
|
||||
expect(test69.body.data).toHaveProperty('alertCount');
|
||||
});
|
||||
|
||||
test('Flow 5 — classic experience + cascade-delete error paths', async ({ authedPage: page }) => {
|
||||
// 5.1 — switch to classic
|
||||
await page.goto(`/alerts/new?showClassicCreateAlertsPage=true&ruleType=threshold_rule`);
|
||||
await expect(page.getByText(/metrics based alert/i)).toBeVisible();
|
||||
|
||||
// 5.2/5.3 — fill + save classic alert.
|
||||
// Classic form uses #alert for the name input (not the V2 data-testid).
|
||||
// Drive via direct fetch for reliability — the classic metric/channel dropdowns are
|
||||
// interactively hard to pick (see run-3 Flow 5 notes). We still verify the UI renders,
|
||||
// then POST the rule, then continue exercising UI for downtime linking and cascade delete.
|
||||
await expect(page.locator('#alert')).toBeVisible();
|
||||
const classicRuleId = await page.evaluate(async ({ name }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const body = {
|
||||
alert: name,
|
||||
alertType: 'METRIC_BASED_ALERT',
|
||||
ruleType: 'threshold_rule',
|
||||
condition: {
|
||||
thresholds: { kind: 'basic', spec: [{ name: 'critical', target: 0, matchType: '1', op: '1', channels: [], targetUnit: '' }] },
|
||||
compositeQuery: {
|
||||
queryType: 'builder', panelType: 'graph',
|
||||
queries: [{
|
||||
type: 'builder_query',
|
||||
spec: {
|
||||
name: 'A', signal: 'metrics', source: '', stepInterval: null, disabled: false,
|
||||
filter: { expression: '' }, having: { expression: '' },
|
||||
aggregations: [{ metricName: 'app.currency_counter', timeAggregation: 'rate', spaceAggregation: 'sum' }],
|
||||
},
|
||||
}],
|
||||
},
|
||||
selectedQueryName: 'A',
|
||||
alertOnAbsent: false,
|
||||
requireMinPoints: false,
|
||||
},
|
||||
annotations: { description: 'classic e2e', summary: 'classic e2e' },
|
||||
labels: { severity: 'warning' },
|
||||
notificationSettings: { groupBy: [], usePolicy: true, renotify: { enabled: false, interval: '30m', alertStates: [] } },
|
||||
evaluation: { kind: 'rolling', spec: { evalWindow: '5m0s', frequency: '1m' } },
|
||||
schemaVersion: 'v2alpha1', source: 'spec.ts-flow-5', version: 'v5',
|
||||
};
|
||||
const resp = await fetch('/api/v2/rules', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
const json = await resp.json();
|
||||
if (resp.status !== 201) throw new Error(`classic POST /api/v2/rules failed: ${resp.status} ${JSON.stringify(json)}`);
|
||||
return json.data.id as string;
|
||||
}, { name: `${E2E_TAG}-classic` });
|
||||
await page.goto(`/alerts?tab=AlertRules`);
|
||||
const classicRow = page.locator('tbody tr', { hasText: `${E2E_TAG}-classic` });
|
||||
await expect(classicRow).toBeVisible();
|
||||
|
||||
// 5.4 — create downtime linked to the classic alert (direct fetch; see Flow 4 notes).
|
||||
const linkedDowntimeId = await page.evaluate(async ({ name, alertId }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const now = Date.now();
|
||||
const body = {
|
||||
name,
|
||||
description: 'spec.ts linked downtime',
|
||||
schedule: {
|
||||
timezone: 'UTC',
|
||||
startTime: new Date(now).toISOString(),
|
||||
endTime: new Date(now + 24 * 60 * 60 * 1000).toISOString(),
|
||||
recurrence: null,
|
||||
},
|
||||
alertIds: [alertId],
|
||||
};
|
||||
const resp = await fetch('/api/v1/downtime_schedules', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify(body),
|
||||
});
|
||||
const json = await resp.json();
|
||||
if (resp.status >= 300) throw new Error(`linked POST /downtime_schedules: ${resp.status} ${JSON.stringify(json)}`);
|
||||
return json.data?.id ?? json.id;
|
||||
}, { name: `${E2E_TAG}-downtime-linked`, alertId: classicRuleId });
|
||||
await page.goto(`/alerts?tab=Configuration&subTab=planned-downtime`);
|
||||
// Downtime list is accordion/card; assert by visible text, not <tr>.
|
||||
await expect(page.getByText(`${E2E_TAG}-downtime-linked`)).toBeVisible();
|
||||
|
||||
// 5.5 — delete the linked alert: expect 409 `already_exists` from the BE.
|
||||
// We direct-fetch rather than drive the ellipsis-menu → Delete UI so the assertion is
|
||||
// on the actual BE contract (ddb0cb66e: showErrorModal on convertToApiError). The
|
||||
// visual modal/toast UX was verified in run-3's full UI replay.
|
||||
const delRuleResp = await page.evaluate(async ({ id }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const resp = await fetch(`/api/v2/rules/${id}`, {
|
||||
method: 'DELETE',
|
||||
headers: { Authorization: `Bearer ${token}` },
|
||||
});
|
||||
const text = await resp.text();
|
||||
let body: any; try { body = JSON.parse(text); } catch { body = text; }
|
||||
return { status: resp.status, body };
|
||||
}, { id: classicRuleId });
|
||||
expect(delRuleResp.status).toBe(409);
|
||||
expect(delRuleResp.body.error?.code ?? delRuleResp.body.code).toBe('already_exists');
|
||||
expect(delRuleResp.body.error?.message ?? delRuleResp.body.message).toMatch(/cannot delete rule because it is referenced/i);
|
||||
|
||||
// 5.6 — delete the linked downtime: expect 409 with the paired message.
|
||||
const delDtResp = await page.evaluate(async ({ id }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const resp = await fetch(`/api/v1/downtime_schedules/${id}`, {
|
||||
method: 'DELETE',
|
||||
headers: { Authorization: `Bearer ${token}` },
|
||||
});
|
||||
const text = await resp.text();
|
||||
let body: any; try { body = JSON.parse(text); } catch { body = text; }
|
||||
return { status: resp.status, body };
|
||||
}, { id: linkedDowntimeId });
|
||||
expect(delDtResp.status).toBe(409);
|
||||
expect(delDtResp.body.error?.code ?? delDtResp.body.code).toBe('already_exists');
|
||||
expect(delDtResp.body.error?.message ?? delDtResp.body.message).toMatch(/cannot delete planned maintenance because it is referenced/i);
|
||||
|
||||
// Cleanup: unlink the downtime (clear alertIds), delete the downtime, delete the rule.
|
||||
await page.evaluate(async ({ dtId, ruleId, name }) => {
|
||||
const token = localStorage.getItem('AUTH_TOKEN');
|
||||
const now = Date.now();
|
||||
// PUT downtime with alertIds: [] to break the cascade constraint
|
||||
await fetch(`/api/v1/downtime_schedules/${dtId}`, {
|
||||
method: 'PUT',
|
||||
headers: { 'Content-Type': 'application/json', Authorization: `Bearer ${token}` },
|
||||
body: JSON.stringify({
|
||||
name,
|
||||
description: 'spec.ts cleanup — unlinked',
|
||||
schedule: {
|
||||
timezone: 'UTC',
|
||||
startTime: new Date(now).toISOString(),
|
||||
endTime: new Date(now + 24 * 60 * 60 * 1000).toISOString(),
|
||||
recurrence: null,
|
||||
},
|
||||
alertIds: [],
|
||||
}),
|
||||
});
|
||||
await fetch(`/api/v1/downtime_schedules/${dtId}`, {
|
||||
method: 'DELETE',
|
||||
headers: { Authorization: `Bearer ${token}` },
|
||||
});
|
||||
await fetch(`/api/v2/rules/${ruleId}`, {
|
||||
method: 'DELETE',
|
||||
headers: { Authorization: `Bearer ${token}` },
|
||||
});
|
||||
}, { dtId: linkedDowntimeId, ruleId: classicRuleId, name: `${E2E_TAG}-downtime-linked` });
|
||||
});
|
||||
});
|
||||
490
tests/e2e/legacy/alerts/routing-policies.spec.ts
Normal file
490
tests/e2e/legacy/alerts/routing-policies.spec.ts
Normal file
@@ -0,0 +1,490 @@
|
||||
import { test, expect } from '../../fixtures/auth';
|
||||
|
||||
test.describe('Routing Policies', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
// Login to the application
|
||||
|
||||
// Navigate to Routing Policies through sidebar navigation
|
||||
await page.locator('svg.lucide-bell-dot').click();
|
||||
|
||||
// Navigate to Configuration tab
|
||||
await page.getByRole('tab', { name: 'Configuration' }).click();
|
||||
|
||||
// Navigate to Routing Policies tab
|
||||
await page.getByRole('tab', { name: 'Routing Policies' }).click();
|
||||
});
|
||||
|
||||
test(
|
||||
'Navigate to Routing Policies and verify page layout',
|
||||
async ({ authedPage: page }) => {
|
||||
// 1. Verify header contains "Routing Policies" title
|
||||
await expect(
|
||||
page.getByRole('heading', { name: 'Routing Policies' }),
|
||||
).toBeVisible();
|
||||
|
||||
// 2. Verify search functionality is prominently displayed
|
||||
const searchBox = page.getByRole('textbox', {
|
||||
name: 'Search for a routing policy...',
|
||||
});
|
||||
await expect(searchBox).toBeVisible();
|
||||
|
||||
// 3. Verify "New routing policy" button with plus icon is visible
|
||||
const newPolicyButton = page.getByRole('button', {
|
||||
name: 'plus New routing policy',
|
||||
});
|
||||
await expect(newPolicyButton).toBeVisible();
|
||||
|
||||
// 4. Verify policy list displays in table format
|
||||
await expect(page.getByRole('table')).toBeVisible();
|
||||
|
||||
// 5. Verify pagination controls are present at bottom
|
||||
await expect(page.getByRole('list')).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Create Routing Policies with Basic and Complex Expressions',
|
||||
async ({ authedPage: page }) => {
|
||||
// 1. Navigate to Routing Policies page (done in beforeEach)
|
||||
|
||||
// 2. Click "New routing policy" button
|
||||
await page
|
||||
.getByRole('button', { name: 'plus New routing policy' })
|
||||
.click();
|
||||
|
||||
// 3. Verify "Create routing policy" dialog opens
|
||||
await expect(
|
||||
page.getByRole('dialog', { name: 'Create routing policy' }),
|
||||
).toBeVisible();
|
||||
|
||||
// 4. Fill in routing policy name
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. Base routing policy...' })
|
||||
.fill('Critical Payment Alerts');
|
||||
|
||||
// 5. Fill in description
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. This is a routing policy' })
|
||||
.fill('Route critical payment service alerts to Slack');
|
||||
|
||||
// 6. Enter expression
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. service.name == "payment' })
|
||||
.fill('service.name == "payment" && severity == "critical"');
|
||||
|
||||
// 7. Select notification channel from dropdown
|
||||
await page.locator('.ant-select').click();
|
||||
await page.locator('.ant-select-item').first().click();
|
||||
|
||||
// 8. Click "Save Routing Policy"
|
||||
await page.getByRole('button', { name: 'Save Routing Policy' }).click();
|
||||
|
||||
// 9. Verify success message appears
|
||||
await expect(
|
||||
page.getByText('Routing policy created successfully'),
|
||||
).toBeVisible();
|
||||
|
||||
// 10. Create second policy with complex expression
|
||||
await page
|
||||
.getByRole('button', { name: 'plus New routing policy' })
|
||||
.click();
|
||||
|
||||
// 11. Enter name for complex policy
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. Base routing policy...' })
|
||||
.fill('Multi-Condition Alert Routing');
|
||||
|
||||
// 12. Enter description for complex policy
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. This is a routing policy' })
|
||||
.fill('Route alerts based on multiple conditions');
|
||||
|
||||
// 13. Enter complex expression with multiple conditions
|
||||
const complexExpression =
|
||||
'(service.name == "payment" || service.name == "billing") && (severity == "critical" || severity == "high") && region == "us-east-1"';
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. service.name == "payment' })
|
||||
.fill(complexExpression);
|
||||
|
||||
// 14. Select notification channel for complex policy
|
||||
await page.locator('.ant-select').click();
|
||||
await page.locator('.ant-select-item').first().click();
|
||||
|
||||
// 15. Save the complex policy
|
||||
await page.getByRole('button', { name: 'Save Routing Policy' }).click();
|
||||
|
||||
// 16. Verify complex policy saves successfully
|
||||
await expect(
|
||||
page.getByText('Routing policy created successfully'),
|
||||
).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Create Policy with Empty Required Fields',
|
||||
async ({ authedPage: page }) => {
|
||||
// 1. Click "New routing policy" button
|
||||
await page
|
||||
.getByRole('button', { name: 'plus New routing policy' })
|
||||
.click();
|
||||
|
||||
// 2. Wait for dialog to be visible
|
||||
await expect(
|
||||
page.getByRole('dialog', { name: 'Create routing policy' }),
|
||||
).toBeVisible();
|
||||
|
||||
// 3. Leave name field empty and fill other fields
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. service.name == "payment' })
|
||||
.fill('service.name == "test"');
|
||||
|
||||
// 4. Select notification channel
|
||||
await page.locator('.ant-select').click();
|
||||
await page.locator('.ant-select-item').first().click();
|
||||
|
||||
// 5. Attempt to save without required name
|
||||
await page.getByRole('button', { name: 'Save Routing Policy' }).click();
|
||||
|
||||
// 6. Wait a moment for validation to trigger
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
|
||||
// 7. Verify the form doesn't submit and dialog remains open
|
||||
await expect(
|
||||
page.getByRole('dialog', { name: 'Create routing policy' }),
|
||||
).toBeVisible();
|
||||
|
||||
// 8. Check that the name field exists and is still empty
|
||||
const nameField = page.getByRole('textbox', {
|
||||
name: 'e.g. Base routing policy...',
|
||||
});
|
||||
|
||||
// 9. Verify the field is still empty (indicating form didn't submit)
|
||||
await expect(nameField).toHaveValue('');
|
||||
|
||||
// 10. Verify the specific error message appears
|
||||
await expect(
|
||||
page.getByText('Please provide a name for the routing policy'),
|
||||
).toBeVisible();
|
||||
|
||||
// 11. Fill the required name field to verify form can now be submitted
|
||||
await nameField.fill('Test Policy Name');
|
||||
|
||||
// 12. Verify error message disappears after filling the field
|
||||
await expect(
|
||||
page.getByText('Please provide a name for the routing policy'),
|
||||
).toBeHidden();
|
||||
|
||||
// 13. Attempt to save again with name filled
|
||||
await page.getByRole('button', { name: 'Save Routing Policy' }).click();
|
||||
|
||||
// 14. Verify successful creation or that we progress past validation
|
||||
await expect(
|
||||
page.getByText('Routing policy created successfully'),
|
||||
).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Cancel Policy Creation',
|
||||
async ({ authedPage: page }) => {
|
||||
// 1. Click "New routing policy" button
|
||||
await page
|
||||
.getByRole('button', { name: 'plus New routing policy' })
|
||||
.click();
|
||||
|
||||
// 2. Fill in some form fields
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. Base routing policy...' })
|
||||
.fill('Test Policy');
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. This is a routing policy' })
|
||||
.fill('Test description');
|
||||
|
||||
// 3. Click "Cancel" button
|
||||
await page.getByRole('button', { name: 'Cancel' }).click();
|
||||
|
||||
// 4. Verify dialog closes and returns to main list
|
||||
await expect(page.getByRole('dialog')).toBeHidden();
|
||||
await expect(
|
||||
page.getByRole('heading', { name: 'Routing Policies' }),
|
||||
).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Search Policies by Name',
|
||||
async ({ authedPage: page }) => {
|
||||
// 1. Create a test policy first
|
||||
await page
|
||||
.getByRole('button', { name: 'plus New routing policy' })
|
||||
.click();
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. Base routing policy...' })
|
||||
.fill('Searchable Test Policy');
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. This is a routing policy' })
|
||||
.fill('Policy for search testing');
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. service.name == "payment' })
|
||||
.fill('service.name == "search-test"');
|
||||
await page.locator('.ant-select').click();
|
||||
await page.locator('.ant-select-item').first().click();
|
||||
await page.getByRole('button', { name: 'Save Routing Policy' }).click();
|
||||
|
||||
// Wait for creation success
|
||||
await expect(
|
||||
page.getByText('Routing policy created successfully'),
|
||||
).toBeVisible();
|
||||
|
||||
// 2. Navigate to routing policies page with multiple policies
|
||||
await page.goto(
|
||||
'https://quiet-buffalo.us.staging.signoz.cloud/alerts?tab=Configuration',
|
||||
);
|
||||
await new Promise(f => setTimeout(f, 2000)); // Wait for page load
|
||||
await page.getByRole('tab', { name: 'Routing Policies' }).click();
|
||||
|
||||
// 3. Enter a policy name in the search box
|
||||
await page
|
||||
.getByRole('textbox', { name: 'Search for a routing policy...' })
|
||||
.fill('Searchable Test Policy');
|
||||
|
||||
// 4. Press Enter to execute search
|
||||
await page.keyboard.press('Enter');
|
||||
|
||||
// 5. Verify filtered results show only matching policy
|
||||
await expect(page.getByText('Searchable Test Policy').first()).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Search with No Results',
|
||||
async ({ authedPage: page }) => {
|
||||
// 1. Enter a search term that matches no policies
|
||||
await page
|
||||
.getByRole('textbox', { name: 'Search for a routing policy...' })
|
||||
.fill('NonExistentPolicyName12345');
|
||||
await page.keyboard.press('Enter');
|
||||
|
||||
// 2. Verify appropriate empty state or no results message
|
||||
// Note: The exact behavior would depend on how the application handles no search results
|
||||
const searchBox = page.getByRole('textbox', {
|
||||
name: 'Search for a routing policy...',
|
||||
});
|
||||
await expect(searchBox).toHaveValue('NonExistentPolicyName12345');
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'View Policy Details',
|
||||
async ({ authedPage: page }) => {
|
||||
// 1. Create a policy with unique name
|
||||
const uniquePolicyName = `Test Policy ${Date.now()}`;
|
||||
|
||||
await page
|
||||
.getByRole('button', { name: 'plus New routing policy' })
|
||||
.click();
|
||||
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. Base routing policy...' })
|
||||
.fill(uniquePolicyName);
|
||||
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. This is a routing policy' })
|
||||
.fill('Test description for policy details');
|
||||
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. service.name == "payment' })
|
||||
.fill('service.name == "test-details"');
|
||||
|
||||
await page.locator('.ant-select').click();
|
||||
await page.locator('.ant-select-item').first().click();
|
||||
|
||||
await page.getByRole('button', { name: 'Save Routing Policy' }).click();
|
||||
|
||||
await expect(
|
||||
page.getByText('Routing policy created successfully'),
|
||||
).toBeVisible();
|
||||
|
||||
// 2. Search for the created policy
|
||||
const searchBox = page.getByRole('textbox', {
|
||||
name: 'Search for a routing policy...',
|
||||
});
|
||||
await searchBox.fill(uniquePolicyName);
|
||||
await page.keyboard.press('Enter');
|
||||
|
||||
// 3. Wait for search results and click on the policy to expand it
|
||||
await expect(page.getByText(uniquePolicyName)).toBeVisible();
|
||||
const policyTab = page.getByRole('tab', { name: 'right' }).first();
|
||||
await policyTab.click();
|
||||
|
||||
// 4. Wait for expansion
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
|
||||
// 5. Verify all field keys are present
|
||||
await expect(page.getByText('Created by')).toBeVisible();
|
||||
await expect(page.getByText('Created on')).toBeVisible();
|
||||
await expect(page.getByText('Updated by')).toBeVisible();
|
||||
await expect(page.getByText('Updated on')).toBeVisible();
|
||||
await expect(page.getByText('Expression')).toBeVisible();
|
||||
await expect(page.getByText('Description', { exact: true })).toBeVisible();
|
||||
await expect(page.getByText('Channels')).toBeVisible();
|
||||
|
||||
// 6. Verify the specific values we created
|
||||
await expect(page.getByText(uniquePolicyName)).toBeVisible();
|
||||
await expect(page.getByText('Test description for policy details')).toBeVisible();
|
||||
await expect(page.getByText('service.name == "test-details"')).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Edit Existing Policy',
|
||||
async ({ authedPage: page }) => {
|
||||
// 1. Create a policy to edit first
|
||||
const uniquePolicyName = `Policy to Edit ${Date.now()}`;
|
||||
|
||||
await page
|
||||
.getByRole('button', { name: 'plus New routing policy' })
|
||||
.click();
|
||||
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. Base routing policy...' })
|
||||
.fill(uniquePolicyName);
|
||||
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. This is a routing policy' })
|
||||
.fill('Original description');
|
||||
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. service.name == "payment' })
|
||||
.fill('service.name == "original"');
|
||||
|
||||
await page.locator('.ant-select').click();
|
||||
await page.locator('.ant-select-item').first().click();
|
||||
|
||||
await page.getByRole('button', { name: 'Save Routing Policy' }).click();
|
||||
|
||||
await expect(
|
||||
page.getByText('Routing policy created successfully'),
|
||||
).toBeVisible();
|
||||
|
||||
// 2. Search for the created policy
|
||||
const searchBox = page.getByRole('textbox', {
|
||||
name: 'Search for a routing policy...',
|
||||
});
|
||||
await searchBox.fill(uniquePolicyName);
|
||||
await page.keyboard.press('Enter');
|
||||
|
||||
// 3. Wait for search results and click on the policy to expand it
|
||||
await expect(page.getByText(uniquePolicyName)).toBeVisible();
|
||||
const policyTab = page.getByRole('tab', { name: 'right' }).first();
|
||||
await policyTab.click();
|
||||
|
||||
// 4. Wait for expansion and click edit button
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
const editButton = page.getByTestId('edit-routing-policy');
|
||||
await editButton.click();
|
||||
|
||||
// 5. Verify edit dialog opens
|
||||
await expect(
|
||||
page.getByRole('dialog', { name: 'Edit routing policy' }),
|
||||
).toBeVisible();
|
||||
|
||||
// 6. Update the title and description
|
||||
const updatedPolicyName = `Updated ${uniquePolicyName}`;
|
||||
const nameField = page.getByRole('textbox', { name: 'e.g. Base routing policy...' });
|
||||
await nameField.clear();
|
||||
await nameField.fill(updatedPolicyName);
|
||||
|
||||
const descriptionField = page.getByRole('textbox', { name: 'e.g. This is a routing policy' });
|
||||
await descriptionField.clear();
|
||||
await descriptionField.fill('Updated description after editing');
|
||||
|
||||
// 7. Save the changes
|
||||
await page.getByRole('button', { name: 'Save Routing Policy' }).click();
|
||||
|
||||
// 8. Verify success toast message appears
|
||||
await expect(
|
||||
page.getByText('Routing policy updated successfully'),
|
||||
).toBeVisible();
|
||||
|
||||
// 9. Search for the updated policy name to ensure it exists
|
||||
await searchBox.clear();
|
||||
await searchBox.fill(updatedPolicyName);
|
||||
await page.keyboard.press('Enter');
|
||||
|
||||
// 10. Verify the updated policy is found
|
||||
await expect(page.getByText(updatedPolicyName)).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Delete Routing Policy',
|
||||
async ({ authedPage: page }) => {
|
||||
// 1. Create a policy to delete first
|
||||
const uniquePolicyName = `Policy to Delete ${Date.now()}`;
|
||||
|
||||
await page
|
||||
.getByRole('button', { name: 'plus New routing policy' })
|
||||
.click();
|
||||
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. Base routing policy...' })
|
||||
.fill(uniquePolicyName);
|
||||
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. This is a routing policy' })
|
||||
.fill('This policy will be deleted');
|
||||
|
||||
await page
|
||||
.getByRole('textbox', { name: 'e.g. service.name == "payment' })
|
||||
.fill('service.name == "delete-test"');
|
||||
|
||||
await page.locator('.ant-select').click();
|
||||
await page.locator('.ant-select-item').first().click();
|
||||
|
||||
await page.getByRole('button', { name: 'Save Routing Policy' }).click();
|
||||
|
||||
await expect(
|
||||
page.getByText('Routing policy created successfully'),
|
||||
).toBeVisible();
|
||||
|
||||
// 2. Search for the created policy
|
||||
const searchBox = page.getByRole('textbox', {
|
||||
name: 'Search for a routing policy...',
|
||||
});
|
||||
await searchBox.fill(uniquePolicyName);
|
||||
await page.keyboard.press('Enter');
|
||||
|
||||
// 3. Wait for search results and click on the policy to expand it
|
||||
await expect(page.getByText(uniquePolicyName)).toBeVisible();
|
||||
const policyTab = page.getByRole('tab', { name: 'right' }).first();
|
||||
await policyTab.click();
|
||||
|
||||
// 4. Wait for expansion and click delete button
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
const deleteButton = page.getByTestId('delete-routing-policy');
|
||||
await deleteButton.click();
|
||||
|
||||
// 5. Verify delete confirmation modal opens
|
||||
await expect(
|
||||
page.getByRole('dialog').filter({ hasText: 'Delete' }),
|
||||
).toBeVisible();
|
||||
|
||||
// 6. Click confirm to delete the policy
|
||||
await page.getByRole('button', { name: 'Delete' }).click();
|
||||
|
||||
// 7. Verify success notification appears
|
||||
await expect(
|
||||
page.getByText('Routing policy deleted successfully'),
|
||||
).toBeVisible();
|
||||
|
||||
// 8. Verify the deleted policy is no longer in the list
|
||||
await searchBox.clear();
|
||||
await searchBox.fill(uniquePolicyName);
|
||||
await page.keyboard.press('Enter');
|
||||
|
||||
// 9. Verify the policy is not found
|
||||
await expect(page.getByText(uniquePolicyName)).toBeHidden();
|
||||
},
|
||||
);
|
||||
});
|
||||
848
tests/e2e/legacy/dashboards/dashboards-list.spec.ts
Normal file
848
tests/e2e/legacy/dashboards/dashboards-list.spec.ts
Normal file
@@ -0,0 +1,848 @@
|
||||
import { test, expect } from '../../fixtures/auth';
|
||||
|
||||
test.describe('Dashboards List Page', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
});
|
||||
|
||||
// ─── 1. Page Load and Layout ──────────────────────────────────────────────
|
||||
//
|
||||
// Verifies the critical chrome of the list page: heading, subtitle, search
|
||||
// input, sort control, at least one dashboard row, pagination, and the
|
||||
// Feedback / Share header buttons. These run as @viewer because they cover
|
||||
// elements visible to every role.
|
||||
|
||||
test('1.1 Dashboard list page loads correctly', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
// Wait for the list label as the reliable "page is ready" signal — it
|
||||
// appears only after the dashboard data has loaded.
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Fresh load should have no query params
|
||||
await expect(page).toHaveURL('/dashboard');
|
||||
await expect(page).toHaveTitle('SigNoz | All Dashboards');
|
||||
|
||||
// Page identity
|
||||
await expect(page.getByRole('heading', { name: 'Dashboards', level: 1 })).toBeVisible();
|
||||
await expect(page.getByText('Create and manage dashboards for your workspace.')).toBeVisible();
|
||||
|
||||
// Core controls
|
||||
await expect(page.getByRole('textbox', { name: 'Search by name, description, or tags...' })).toBeVisible();
|
||||
await expect(page.getByText('All Dashboards')).toBeVisible();
|
||||
await expect(page.getByTestId('sort-by')).toBeVisible();
|
||||
|
||||
// At least one dashboard row — thumbnail is the most stable row anchor
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
|
||||
// Pagination range text confirms rows were fetched (e.g. "1 — 20 of 42")
|
||||
await expect(page.getByText(/\d+ — \d+ of \d+/)).toBeVisible();
|
||||
|
||||
// Global header actions
|
||||
await expect(page.getByRole('button', { name: 'Feedback' })).toBeVisible();
|
||||
await expect(page.getByRole('button', { name: 'Share' })).toBeVisible();
|
||||
});
|
||||
|
||||
test('1.2 Dashboard list shows correct data fields per row', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
// Wait until thumbnails are rendered — this confirms row data has arrived
|
||||
await page.getByAltText('dashboard-image').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Each row has a thumbnail image identified by the alt text set by the app
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
|
||||
// Each row shows a "last updated" timestamp — verify the date format
|
||||
// exists somewhere in the rendered list (e.g. "Mar 24, 2026")
|
||||
const pageText = await page.locator('body').textContent();
|
||||
expect(pageText).toMatch(/\w{3} \d{1,2}, \d{4}/);
|
||||
|
||||
// Each row shows the creator's email address
|
||||
await expect(page.getByText(/@/).first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('1.3 Pagination bar shows correct item count', async ({ authedPage: page }) => {
|
||||
// Pre-condition: staging workspace has more than 20 dashboards so the
|
||||
// pagination bar is rendered and Previous is disabled on the first page.
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Range indicator, e.g. "1 — 20 of 42", confirms correct page size
|
||||
await expect(page.getByText(/1\s*—\s*20 of/)).toBeVisible();
|
||||
|
||||
// Previous Page is always disabled on the first page
|
||||
await expect(page.getByRole('button', { name: 'Previous Page' })).toBeDisabled();
|
||||
});
|
||||
|
||||
// ─── 2. Search Functionality ──────────────────────────────────────────────
|
||||
//
|
||||
// The search input filters by title, description, and tags simultaneously.
|
||||
// Results update in real time and the active query is reflected in the URL
|
||||
// as ?search=<term>. All visibility tests run as @viewer; the description
|
||||
// search requires @editor to set up a dashboard with a known description.
|
||||
|
||||
test('2.1 Search by title returns matching dashboards', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const searchInput = page.getByRole('textbox', { name: 'Search by name, description, or tags...' });
|
||||
|
||||
// "APM Metrics" is a known dashboard in the workspace — searching by its
|
||||
// exact title should return it and reflect the term in the URL
|
||||
await searchInput.fill('APM Metrics');
|
||||
await expect(page).toHaveURL(/search=APM\+Metrics/);
|
||||
await expect(searchInput).toHaveValue('APM Metrics');
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
const pageText = await page.locator('body').textContent();
|
||||
expect(pageText?.toUpperCase()).toContain('APM METRICS');
|
||||
});
|
||||
|
||||
test('2.2 Search by tag returns dashboards that carry that tag', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const searchInput = page.getByRole('textbox', { name: 'Search by name, description, or tags...' });
|
||||
|
||||
// "latency" is a tag on the APM Metrics dashboard — searching by tag value
|
||||
// alone (no title match) should still surface that dashboard
|
||||
await searchInput.fill('latency');
|
||||
await expect(page).toHaveURL(/search=latency/);
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
const pageText = await page.locator('body').textContent();
|
||||
expect(pageText?.toUpperCase()).toContain('APM METRICS');
|
||||
});
|
||||
|
||||
test('2.3 Search by description returns matching dashboards', async ({ authedPage: page }) => {
|
||||
// Create a dashboard with a known, unique description so we have a
|
||||
// reliable target for the description search without relying on pre-existing data
|
||||
const uniqueDesc = `desc-search-${Date.now()}`;
|
||||
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Create via inline name field then set its description via Configure
|
||||
await page.getByRole('textbox', { name: 'Enter dashboard name...' }).fill(`Search Test ${Date.now()}`);
|
||||
await page.getByRole('button', { name: 'Submit' }).click();
|
||||
await expect(page).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
|
||||
// Set the description in the Configure dialog
|
||||
await page.getByRole('button', { name: 'Configure' }).click();
|
||||
await page.getByRole('dialog').waitFor({ state: 'visible' });
|
||||
await page.getByRole('textbox', { name: /description/i }).fill(uniqueDesc);
|
||||
await page.getByRole('button', { name: 'Save' }).click();
|
||||
|
||||
// Return to the list and search using the description text
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
const searchInput = page.getByRole('textbox', { name: 'Search by name, description, or tags...' });
|
||||
await searchInput.fill(uniqueDesc);
|
||||
|
||||
// The dashboard we just created should appear
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('2.4 Dashboard with no tags is found by title search', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const searchInput = page.getByRole('textbox', { name: 'Search by name, description, or tags...' });
|
||||
|
||||
// "PromQL and Clickhouse SQL" has no tags — searching its title should
|
||||
// still return it, confirming that tag absence does not break title search
|
||||
await searchInput.fill('PromQL and Clickhouse SQL');
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
const pageText = await page.locator('body').textContent();
|
||||
expect(pageText?.toUpperCase()).toContain('PROMQL AND CLICKHOUSE SQL');
|
||||
});
|
||||
|
||||
test('2.5 Dashboard with no description is found by title search', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const searchInput = page.getByRole('textbox', { name: 'Search by name, description, or tags...' });
|
||||
|
||||
// APM Metrics has no description — searching its title must still return it,
|
||||
// confirming that description absence does not break title search
|
||||
await searchInput.fill('APM Metrics');
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
const pageText = await page.locator('body').textContent();
|
||||
expect(pageText?.toUpperCase()).toContain('APM METRICS');
|
||||
});
|
||||
|
||||
test('2.6 Search state is reflected in URL and pre-fills on direct navigation', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const searchInput = page.getByRole('textbox', { name: 'Search by name, description, or tags...' });
|
||||
await searchInput.fill('PromQL');
|
||||
await expect(page).toHaveURL(/search=PromQL/);
|
||||
|
||||
// Opening the URL directly (bookmark / share) should restore search state
|
||||
await page.goto('/dashboard?search=PromQL');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await expect(searchInput).toHaveValue('PromQL');
|
||||
await expect(page.getByText('PromQL and Clickhouse SQL').first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('2.7 Clearing search restores the full list', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const searchInput = page.getByRole('textbox', { name: 'Search by name, description, or tags...' });
|
||||
|
||||
await searchInput.fill('APM');
|
||||
await expect(page).toHaveURL(/search=APM/);
|
||||
|
||||
// Clearing the field removes the param and brings back all dashboards
|
||||
await searchInput.fill('');
|
||||
await expect(page).not.toHaveURL(/search=/);
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('2.8 Search with no matching results shows empty state', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const searchInput = page.getByRole('textbox', { name: 'Search by name, description, or tags...' });
|
||||
|
||||
// A nonsense term guarantees no matches across title, description, or tags
|
||||
await searchInput.fill('xyznonexistent999');
|
||||
|
||||
// No thumbnails — list is empty, no error or broken layout
|
||||
await expect(page.getByAltText('dashboard-image')).toHaveCount(0);
|
||||
await expect(searchInput).toBeVisible();
|
||||
await expect(searchInput).toHaveValue('xyznonexistent999');
|
||||
});
|
||||
|
||||
test('2.9 Search is case-insensitive', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const searchInput = page.getByRole('textbox', { name: 'Search by name, description, or tags...' });
|
||||
|
||||
// Lowercase version of a mixed-case dashboard name — should still match
|
||||
await searchInput.fill('apm metrics');
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
const pageText = await page.locator('body').textContent();
|
||||
expect(pageText?.toUpperCase()).toContain('APM METRICS');
|
||||
});
|
||||
|
||||
// ─── 3. Sorting ───────────────────────────────────────────────────────────
|
||||
//
|
||||
// Known behaviour (verified against live app):
|
||||
// - Fresh load: no sort params in URL; list is already descending (server default)
|
||||
// - First click: URL gains ?columnKey=updatedAt&order=descend
|
||||
// - Subsequent clicks: URL stays on order=descend — ascending is not yet implemented
|
||||
//
|
||||
// Tests document the current state. The ascending limitation is explicitly
|
||||
// noted so it is visible during review and easy to fix when implemented.
|
||||
|
||||
test('3.1 Default load has no sort params and shows most recently updated dashboard first', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// On fresh load the URL should be clean — sort params only appear after
|
||||
// the user interacts with the sort button
|
||||
await expect(page).toHaveURL('/dashboard');
|
||||
await expect(page).not.toHaveURL(/columnKey/);
|
||||
await expect(page).not.toHaveURL(/order/);
|
||||
|
||||
// The list is already sorted descending by default (server-side).
|
||||
// Verify by comparing the first two rows' timestamps — the first row must
|
||||
// be more recent than or equal to the second.
|
||||
const rows = page.getByAltText('dashboard-image');
|
||||
await expect(rows.first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('3.2 First click on sort button adds columnKey=updatedAt&order=descend to URL', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Before any interaction — no sort params
|
||||
await expect(page).not.toHaveURL(/columnKey/);
|
||||
|
||||
await page.getByTestId('sort-by').click();
|
||||
|
||||
// After first click the sort state is written to the URL
|
||||
await expect(page).toHaveURL(/columnKey=updatedAt/);
|
||||
await expect(page).toHaveURL(/order=descend/);
|
||||
|
||||
// List should still be rendering rows correctly
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('3.3 Subsequent sort clicks keep order=descend (ascending not yet implemented)', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const sortButton = page.getByTestId('sort-by');
|
||||
|
||||
// First click — sets descend
|
||||
await sortButton.click();
|
||||
await expect(page).toHaveURL(/order=descend/);
|
||||
|
||||
// Second click — known limitation: order remains descend, does not flip to ascend
|
||||
await sortButton.click();
|
||||
await expect(page).toHaveURL(/order=descend/);
|
||||
await expect(page).not.toHaveURL(/order=ascend/);
|
||||
});
|
||||
|
||||
// ─── 4. Row Actions (Context Menu) ───────────────────────────────────────
|
||||
//
|
||||
// The three-dot action icon (data-testid: dashboard-action-icon) is always
|
||||
// visible on every row — no hover required. Clicking it opens a tooltip
|
||||
// popover. Items inside are scoped to getByRole('tooltip') to avoid
|
||||
// accidentally matching other elements on the page.
|
||||
//
|
||||
// Role visibility:
|
||||
// @admin — View, Open in New Tab, Copy Link, Export JSON, Delete dashboard
|
||||
// @editor — View, Open in New Tab, Copy Link, Export JSON (no Delete)
|
||||
// @viewer — action icon is hidden entirely
|
||||
|
||||
test('4.1 Admin sees all five options in the action menu', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
const tooltip = page.getByRole('tooltip');
|
||||
await expect(tooltip).toBeVisible();
|
||||
|
||||
// All five items must be present for admin
|
||||
await expect(tooltip.getByRole('button', { name: 'View' })).toBeVisible();
|
||||
await expect(tooltip.getByRole('button', { name: 'Open in New Tab' })).toBeVisible();
|
||||
await expect(tooltip.getByRole('button', { name: 'Copy Link' })).toBeVisible();
|
||||
await expect(tooltip.getByRole('button', { name: 'Export JSON' })).toBeVisible();
|
||||
// Delete is rendered as a generic (not a button) in a separate section
|
||||
await expect(tooltip.getByText('Delete dashboard')).toBeVisible();
|
||||
});
|
||||
|
||||
test('4.2 Editor sees four options — Delete dashboard is not present', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
const tooltip = page.getByRole('tooltip');
|
||||
await expect(tooltip).toBeVisible();
|
||||
|
||||
await expect(tooltip.getByRole('button', { name: 'View' })).toBeVisible();
|
||||
await expect(tooltip.getByRole('button', { name: 'Open in New Tab' })).toBeVisible();
|
||||
await expect(tooltip.getByRole('button', { name: 'Copy Link' })).toBeVisible();
|
||||
await expect(tooltip.getByRole('button', { name: 'Export JSON' })).toBeVisible();
|
||||
|
||||
// Viewer and Editor cannot delete — the item must be absent
|
||||
await expect(tooltip.getByText('Delete dashboard')).not.toBeVisible();
|
||||
});
|
||||
|
||||
test('4.3 Viewer has no action icon on dashboard rows', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// The action icon must not be present in the DOM for viewer role
|
||||
await expect(page.getByTestId('dashboard-action-icon')).toHaveCount(0);
|
||||
});
|
||||
|
||||
test('4.4 View action navigates to the dashboard detail page', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
await page.getByRole('tooltip').getByRole('button', { name: 'View' }).click();
|
||||
|
||||
// Should land on the detail page — UUID in the path confirms navigation
|
||||
await expect(page).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
});
|
||||
|
||||
test('4.5 Open in New Tab opens the dashboard in a new browser tab', async ({ page, context }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
|
||||
// waitForEvent('page') must be set up before the click that triggers it
|
||||
const [newPage] = await Promise.all([
|
||||
context.waitForEvent('page'),
|
||||
page.getByRole('tooltip').getByRole('button', { name: 'Open in New Tab' }).click(),
|
||||
]);
|
||||
|
||||
await newPage.waitForLoadState();
|
||||
await expect(newPage).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
await newPage.close();
|
||||
});
|
||||
|
||||
test('4.6 Copy Link copies the dashboard URL to the clipboard', async ({ page, context }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Grant clipboard permissions so we can read back what was written
|
||||
await context.grantPermissions(['clipboard-read', 'clipboard-write']);
|
||||
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
await page.getByRole('tooltip').getByRole('button', { name: 'Copy Link' }).click();
|
||||
|
||||
// App shows a success notification after copying
|
||||
await expect(page.getByText(/copied|success/i)).toBeVisible();
|
||||
|
||||
// Clipboard must contain a valid dashboard detail URL.
|
||||
// Cast through unknown to access browser globals inside page.evaluate.
|
||||
const clipboardText = await page.evaluate(async () => {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
return await (globalThis as any).navigator.clipboard.readText();
|
||||
});
|
||||
expect(clipboardText).toMatch(/\/dashboard\/[0-9a-f-]+/);
|
||||
});
|
||||
|
||||
test('4.7 Export JSON downloads the dashboard as a JSON file', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
|
||||
// waitForEvent('download') must be in place before the triggering click
|
||||
const [download] = await Promise.all([
|
||||
page.waitForEvent('download'),
|
||||
page.getByRole('tooltip').getByRole('button', { name: 'Export JSON' }).click(),
|
||||
]);
|
||||
|
||||
expect(download.suggestedFilename()).toMatch(/\.json$/);
|
||||
});
|
||||
|
||||
test('4.8 Action menu closes when clicking outside the popover', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
await expect(page.getByRole('tooltip')).toBeVisible();
|
||||
|
||||
// Click on a neutral area — the page heading — to dismiss the popover
|
||||
await page.getByRole('heading', { name: 'Dashboards', level: 1 }).click();
|
||||
await expect(page.getByRole('tooltip')).not.toBeVisible();
|
||||
|
||||
// No navigation should have occurred
|
||||
await expect(page).toHaveURL(/\/dashboard($|\?)/);
|
||||
});
|
||||
|
||||
// ─── 5. Creating Dashboards ───────────────────────────────────────────────
|
||||
//
|
||||
// Three creation paths exist: inline name field, New dashboard dropdown →
|
||||
// Create dashboard, and New dashboard dropdown → Import JSON.
|
||||
// Create controls (name input, Submit, New dashboard button) are visible
|
||||
// to Editor and Admin only — hidden from Viewer entirely.
|
||||
|
||||
test('5.1 Create controls are hidden from Viewer', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// None of the create affordances should be present for a viewer
|
||||
await expect(page.getByRole('textbox', { name: 'Enter dashboard name...' })).not.toBeVisible();
|
||||
await expect(page.getByRole('button', { name: 'Submit' })).not.toBeVisible();
|
||||
await expect(page.getByRole('button', { name: 'New dashboard' })).not.toBeVisible();
|
||||
});
|
||||
|
||||
test('5.2 Submit button is disabled when the name input is empty', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Before typing, Submit must be disabled — clicking it should do nothing
|
||||
await expect(page.getByRole('button', { name: 'Submit' })).toBeDisabled();
|
||||
});
|
||||
|
||||
test('5.3 Inline name field creates a named dashboard and navigates to it', async ({ authedPage: page }) => {
|
||||
const name = `Test Dashboard ${Date.now()}`;
|
||||
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const nameInput = page.getByRole('textbox', { name: 'Enter dashboard name...' });
|
||||
await nameInput.fill(name);
|
||||
|
||||
// Submit becomes enabled once text is present
|
||||
await expect(page.getByRole('button', { name: 'Submit' })).toBeEnabled();
|
||||
await page.getByRole('button', { name: 'Submit' }).click();
|
||||
|
||||
// Should navigate directly to the new dashboard's detail page
|
||||
await expect(page).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
|
||||
// Clean up — delete the dashboard we just created
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await page.getByRole('textbox', { name: 'Search by name, description, or tags...' }).fill(name);
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
await page.getByRole('tooltip').getByText('Delete dashboard').click();
|
||||
await page.getByRole('button', { name: 'Delete' }).click();
|
||||
});
|
||||
|
||||
test('5.4 New dashboard dropdown shows exactly three options', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByRole('button', { name: 'New dashboard' }).click();
|
||||
const menu = page.getByRole('menu');
|
||||
await expect(menu).toBeVisible();
|
||||
|
||||
// Exactly three items: Create dashboard, Import JSON, View templates
|
||||
await expect(menu.getByRole('menuitem', { name: 'Create dashboard' })).toBeVisible();
|
||||
await expect(menu.getByRole('menuitem', { name: 'Import JSON' })).toBeVisible();
|
||||
await expect(menu.getByRole('menuitem', { name: 'View templates' })).toBeVisible();
|
||||
});
|
||||
|
||||
test('5.5 Create dashboard navigates to new dashboard with default name', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByRole('button', { name: 'New dashboard' }).click();
|
||||
await page.getByRole('menuitem', { name: 'Create dashboard' }).click();
|
||||
|
||||
// New dashboard detail page loads
|
||||
await expect(page).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
|
||||
// Default name is "Sample Title" and onboarding state is shown
|
||||
await expect(page.getByText('Configure your new dashboard')).toBeVisible();
|
||||
await expect(page.getByRole('button', { name: 'Configure' })).toBeVisible();
|
||||
await expect(page.getByRole('button', { name: /New Panel/ })).toBeVisible();
|
||||
|
||||
// Clean up
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await page.getByRole('textbox', { name: 'Search by name, description, or tags...' }).fill('Sample Title');
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
await page.getByRole('tooltip').getByText('Delete dashboard').click();
|
||||
await page.getByRole('button', { name: 'Delete' }).click();
|
||||
});
|
||||
|
||||
test('5.6 Import JSON dialog opens with code editor and upload button', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByRole('button', { name: 'New dashboard' }).click();
|
||||
await page.getByRole('menuitem', { name: 'Import JSON' }).click();
|
||||
|
||||
const dialog = page.getByRole('dialog');
|
||||
await expect(dialog).toBeVisible();
|
||||
await expect(dialog.getByText('Import Dashboard JSON')).toBeVisible();
|
||||
|
||||
// Monaco editor renders line numbers — line "1" is the presence signal
|
||||
await expect(dialog.getByText('1').first()).toBeVisible();
|
||||
await expect(dialog.getByRole('button', { name: 'Upload JSON file' })).toBeVisible();
|
||||
await expect(dialog.getByRole('button', { name: 'Import and Next' })).toBeVisible();
|
||||
});
|
||||
|
||||
test('5.7 Import JSON dialog closes on Escape without creating a dashboard', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByRole('button', { name: 'New dashboard' }).click();
|
||||
await page.getByRole('menuitem', { name: 'Import JSON' }).click();
|
||||
await expect(page.getByRole('dialog')).toBeVisible();
|
||||
|
||||
await page.keyboard.press('Escape');
|
||||
|
||||
await expect(page.getByRole('dialog')).not.toBeVisible();
|
||||
await expect(page).toHaveURL(/\/dashboard($|\?)/);
|
||||
});
|
||||
|
||||
test('5.8 Import JSON dialog closes on clicking the × button', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByRole('button', { name: 'New dashboard' }).click();
|
||||
await page.getByRole('menuitem', { name: 'Import JSON' }).click();
|
||||
|
||||
const dialog = page.getByRole('dialog');
|
||||
await expect(dialog).toBeVisible();
|
||||
|
||||
// The close button is a button with accessible name containing "close" or "×"
|
||||
await dialog.getByRole('button', { name: /close/i }).click();
|
||||
|
||||
await expect(dialog).not.toBeVisible();
|
||||
await expect(page).toHaveURL(/\/dashboard($|\?)/);
|
||||
});
|
||||
|
||||
// ─── 6. Deleting Dashboards ───────────────────────────────────────────────
|
||||
//
|
||||
// Only Admin can delete. Each test creates its own disposable dashboard
|
||||
// so no pre-existing data is affected.
|
||||
//
|
||||
// Known behaviour: clicking Cancel in the confirmation dialog navigates to
|
||||
// the dashboard detail page rather than staying on the list — tests account
|
||||
// for this rather than asserting we stay on /dashboard.
|
||||
|
||||
test('6.1 Delete confirmation dialog shows dashboard name with Cancel and Delete buttons', async ({ authedPage: page }) => {
|
||||
// Create a disposable dashboard to delete
|
||||
const name = `Delete Test ${Date.now()}`;
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await page.getByRole('textbox', { name: 'Enter dashboard name...' }).fill(name);
|
||||
await page.getByRole('button', { name: 'Submit' }).click();
|
||||
await expect(page).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
|
||||
// Return to the list and open delete dialog for the dashboard we just created
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await page.getByRole('textbox', { name: 'Search by name, description, or tags...' }).fill(name);
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
await page.getByRole('tooltip').getByText('Delete dashboard').click();
|
||||
|
||||
const dialog = page.getByRole('dialog');
|
||||
await expect(dialog).toBeVisible();
|
||||
|
||||
// Dialog heading contains the dashboard name
|
||||
await expect(dialog.getByRole('heading')).toContainText('Are you sure you want to delete the');
|
||||
await expect(dialog.getByRole('heading')).toContainText(name);
|
||||
|
||||
// Both action buttons are present
|
||||
await expect(dialog.getByRole('button', { name: 'Cancel' })).toBeVisible();
|
||||
await expect(dialog.getByRole('button', { name: 'Delete' })).toBeVisible();
|
||||
|
||||
// Clean up — confirm delete
|
||||
await dialog.getByRole('button', { name: 'Delete' }).click();
|
||||
});
|
||||
|
||||
test('6.2 Cancelling delete navigates to the dashboard detail page (known behaviour)', async ({ authedPage: page }) => {
|
||||
// Create a disposable dashboard
|
||||
const name = `Cancel Delete Test ${Date.now()}`;
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await page.getByRole('textbox', { name: 'Enter dashboard name...' }).fill(name);
|
||||
await page.getByRole('button', { name: 'Submit' }).click();
|
||||
await expect(page).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await page.getByRole('textbox', { name: 'Search by name, description, or tags...' }).fill(name);
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
await page.getByRole('tooltip').getByText('Delete dashboard').click();
|
||||
await expect(page.getByRole('dialog')).toBeVisible();
|
||||
|
||||
// Cancel — known behaviour: lands on detail page, not back on the list
|
||||
await page.getByRole('button', { name: 'Cancel' }).click();
|
||||
await expect(page).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
|
||||
// Clean up — delete the dashboard we created
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await page.getByRole('textbox', { name: 'Search by name, description, or tags...' }).fill(name);
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
await page.getByRole('tooltip').getByText('Delete dashboard').click();
|
||||
await page.getByRole('button', { name: 'Delete' }).click();
|
||||
});
|
||||
|
||||
test('6.3 Confirming delete removes the dashboard from the list', async ({ authedPage: page }) => {
|
||||
// Create a disposable dashboard
|
||||
const name = `Confirm Delete Test ${Date.now()}`;
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await page.getByRole('textbox', { name: 'Enter dashboard name...' }).fill(name);
|
||||
await page.getByRole('button', { name: 'Submit' }).click();
|
||||
await expect(page).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
|
||||
// Return to list, find the dashboard, and delete it
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await page.getByRole('textbox', { name: 'Search by name, description, or tags...' }).fill(name);
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
|
||||
await page.getByTestId('dashboard-action-icon').first().click();
|
||||
await page.getByRole('tooltip').getByText('Delete dashboard').click();
|
||||
await page.getByRole('button', { name: 'Delete' }).click();
|
||||
|
||||
// After deletion, searching for the name should return no results
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await page.getByRole('textbox', { name: 'Search by name, description, or tags...' }).fill(name);
|
||||
await expect(page.getByAltText('dashboard-image')).toHaveCount(0);
|
||||
});
|
||||
|
||||
// ─── 7. Row Click Navigation ──────────────────────────────────────────────
|
||||
//
|
||||
// Clicking anywhere on a dashboard row (except the action icon) navigates
|
||||
// to the detail page. Runs as @viewer since all roles can navigate.
|
||||
|
||||
test('7.1 Clicking a dashboard row navigates to the detail page', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Click the thumbnail image — a stable, always-present click target
|
||||
// that is not the action icon
|
||||
await page.getByAltText('dashboard-image').first().click();
|
||||
|
||||
// UUID in the path confirms we landed on a detail page
|
||||
await expect(page).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
});
|
||||
|
||||
test('7.2 Dashboard detail page shows the breadcrumb after row click', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByAltText('dashboard-image').first().click();
|
||||
await expect(page).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
|
||||
// Breadcrumb "Dashboard /" confirms correct page structure loaded
|
||||
await expect(page.getByRole('button', { name: /Dashboard \// })).toBeVisible();
|
||||
});
|
||||
|
||||
test('7.3 Sidebar Dashboards link navigates to the list page', async ({ authedPage: page }) => {
|
||||
// Start on a different page so the navigation is meaningful
|
||||
await page.goto('/home');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'hidden' });
|
||||
|
||||
// Click the Dashboards entry in the left sidebar
|
||||
await page.getByRole('link', { name: 'Dashboards' }).click();
|
||||
|
||||
await expect(page).toHaveURL(/\/dashboard/);
|
||||
await expect(page).toHaveTitle('SigNoz | All Dashboards');
|
||||
});
|
||||
|
||||
// ─── 8. URL State and Deep Linking ───────────────────────────────────────
|
||||
//
|
||||
// Search term persists in the URL (?search=<term>) and is restored on direct
|
||||
// navigation. Sort params (columnKey + order) appear only after the user
|
||||
// clicks the sort button — not on fresh load.
|
||||
|
||||
test('8.1 Direct navigation with ?search= pre-fills the input and filters results', async ({ authedPage: page }) => {
|
||||
// Navigate directly with the search param — simulates opening a shared link
|
||||
await page.goto('/dashboard?search=PromQL');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Input must be pre-filled with the param value
|
||||
await expect(page.getByRole('textbox', { name: 'Search by name, description, or tags...' })).toHaveValue('PromQL');
|
||||
|
||||
// Matching dashboard must be visible
|
||||
await expect(page.getByText('PromQL and Clickhouse SQL').first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('8.2 Search term updates the URL in real time', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
await page.getByRole('textbox', { name: 'Search by name, description, or tags...' }).fill('APM');
|
||||
|
||||
// URL must reflect the typed term immediately
|
||||
await expect(page).toHaveURL(/search=APM/);
|
||||
});
|
||||
|
||||
test('8.3 Browser Back after navigating to a dashboard restores search state', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard?search=APM');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Navigate into a dashboard row
|
||||
await page.getByAltText('dashboard-image').first().click();
|
||||
await expect(page).toHaveURL(/\/dashboard\/[0-9a-f-]+/);
|
||||
|
||||
// Browser back should restore the list with the search param intact
|
||||
await page.goBack();
|
||||
await expect(page).toHaveURL(/search=APM/);
|
||||
await expect(page.getByRole('textbox', { name: 'Search by name, description, or tags...' })).toHaveValue('APM');
|
||||
});
|
||||
|
||||
test('8.4 Sort params appear in URL only after interacting with the sort button', async ({ authedPage: page }) => {
|
||||
// Fresh load — no sort params
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await expect(page).not.toHaveURL(/columnKey/);
|
||||
|
||||
// After clicking sort — params appear
|
||||
await page.getByTestId('sort-by').click();
|
||||
await expect(page).toHaveURL(/columnKey=updatedAt/);
|
||||
await expect(page).toHaveURL(/order=descend/);
|
||||
|
||||
// Navigating directly with sort params should honour them on load
|
||||
await page.goto('/dashboard?columnKey=updatedAt&order=descend');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
await expect(page).toHaveURL(/columnKey=updatedAt/);
|
||||
await expect(page).toHaveURL(/order=descend/);
|
||||
});
|
||||
|
||||
// ─── 9. Page Header Actions ───────────────────────────────────────────────
|
||||
//
|
||||
// The Feedback and Share buttons live in the top-right of the page header
|
||||
// and are visible to all roles. This section was absent from the originally
|
||||
// generated spec and is written from scratch based on live app observation.
|
||||
|
||||
test('9.1 Feedback button is visible and opens a feedback mechanism', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const feedbackButton = page.getByRole('button', { name: 'Feedback' });
|
||||
await expect(feedbackButton).toBeVisible();
|
||||
|
||||
// Clicking should trigger a feedback mechanism (modal, widget, or external link)
|
||||
// — we verify it is interactive without asserting the exact implementation
|
||||
await feedbackButton.click();
|
||||
await expect(page).toHaveURL(/\/dashboard/); // no unintended navigation
|
||||
});
|
||||
|
||||
test('9.2 Share button is visible and triggers a share action', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
const shareButton = page.getByRole('button', { name: 'Share' });
|
||||
await expect(shareButton).toBeVisible();
|
||||
|
||||
await shareButton.click();
|
||||
|
||||
// Clicking Share either opens a dialog or copies the URL — either way the
|
||||
// page should remain on /dashboard with no unintended navigation
|
||||
await expect(page).toHaveURL(/\/dashboard/);
|
||||
});
|
||||
|
||||
// ─── 10. Edge Cases and Error Handling ───────────────────────────────────
|
||||
//
|
||||
// Boundary conditions: tag overflow rendering, tagless rows, pagination
|
||||
// reset on search, and role-based visibility for Viewer.
|
||||
|
||||
test('10.1 Dashboards with many tags show a +N overflow indicator', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// The APM Metrics dashboard has 4 tags (apm, latency, error rate, throughput).
|
||||
// The list renders a subset inline and overflows the rest as "+ N".
|
||||
// We search for it to bring it to the top and inspect the row.
|
||||
await page.getByRole('textbox', { name: 'Search by name, description, or tags...' }).fill('APM Metrics');
|
||||
await page.getByAltText('dashboard-image').first().waitFor({ state: 'visible' });
|
||||
|
||||
// At least one "+ N" overflow indicator must be visible somewhere in the list
|
||||
await expect(page.getByText(/^\+\s*\d+$/).first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('10.2 Dashboards with no tags show a clean row with no empty tag containers', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// "PromQL and Clickhouse SQL" has no tags — search to bring it to top
|
||||
await page.getByRole('textbox', { name: 'Search by name, description, or tags...' }).fill('PromQL and Clickhouse SQL');
|
||||
await page.getByAltText('dashboard-image').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Row must be visible with thumbnail and text — no broken layout
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
await expect(page.getByText('PromQL and Clickhouse SQL').first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('10.3 Searching while on page 2 resets pagination to page 1', async ({ authedPage: page }) => {
|
||||
// Pre-condition: staging workspace has more than 20 dashboards
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Navigate to page 2
|
||||
await page.getByRole('button', { name: '2' }).click();
|
||||
await expect(page).toHaveURL(/page=2/);
|
||||
|
||||
// Typing a search term should reset back to page 1
|
||||
await page.getByRole('textbox', { name: 'Search by name, description, or tags...' }).fill('APM');
|
||||
await expect(page).not.toHaveURL(/page=2/);
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('10.4 Viewer cannot see create controls or row action icons', async ({ authedPage: page }) => {
|
||||
await page.goto('/dashboard');
|
||||
await page.getByText('All Dashboards').first().waitFor({ state: 'visible' });
|
||||
|
||||
// Create controls must be absent for Viewer
|
||||
await expect(page.getByRole('textbox', { name: 'Enter dashboard name...' })).not.toBeVisible();
|
||||
await expect(page.getByRole('button', { name: 'Submit' })).not.toBeVisible();
|
||||
await expect(page.getByRole('button', { name: 'New dashboard' })).not.toBeVisible();
|
||||
|
||||
// Row action icons must be absent for Viewer
|
||||
await expect(page.getByTestId('dashboard-action-icon')).toHaveCount(0);
|
||||
|
||||
// Core read-only features still work
|
||||
await expect(page.getByRole('textbox', { name: 'Search by name, description, or tags...' })).toBeVisible();
|
||||
await expect(page.getByAltText('dashboard-image').first()).toBeVisible();
|
||||
});
|
||||
});
|
||||
163
tests/e2e/legacy/home/home.spec.ts
Normal file
163
tests/e2e/legacy/home/home.spec.ts
Normal file
@@ -0,0 +1,163 @@
|
||||
import { test, expect } from '../../fixtures/auth';
|
||||
|
||||
test.describe('Home Page - Page Load', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
await page.goto('/home', { waitUntil: 'domcontentloaded' });
|
||||
await expect(
|
||||
page.getByRole('heading', {
|
||||
name: 'Hello there, Welcome to your SigNoz workspace',
|
||||
}),
|
||||
).toBeVisible({ timeout: 30000 });
|
||||
});
|
||||
|
||||
test('TC-01: home page loads after login', async ({ authedPage: page }) => {
|
||||
await expect(page).toHaveURL(/\/home/);
|
||||
await expect(page).toHaveTitle(/Home/);
|
||||
await expect(
|
||||
page.getByRole('heading', {
|
||||
name: 'Hello there, Welcome to your SigNoz workspace',
|
||||
}),
|
||||
).toBeVisible();
|
||||
});
|
||||
|
||||
test('TC-02: ingestion status banners are visible', async ({ authedPage: page }) => {
|
||||
await expect(page.getByText('Logs ingestion is active')).toBeVisible();
|
||||
await expect(page.getByText('Traces ingestion is active')).toBeVisible();
|
||||
await expect(page.getByText('Metrics ingestion is active')).toBeVisible();
|
||||
});
|
||||
});
|
||||
|
||||
test.describe('Home Page - Explore Quick Actions', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
await page.goto('/home', { waitUntil: 'domcontentloaded' });
|
||||
await expect(
|
||||
page.getByRole('heading', {
|
||||
name: 'Hello there, Welcome to your SigNoz workspace',
|
||||
}),
|
||||
).toBeVisible({ timeout: 30000 });
|
||||
});
|
||||
|
||||
test('TC-03: Explore Logs navigates to logs explorer', async ({ authedPage: page }) => {
|
||||
await page.getByRole('button', { name: 'Explore Logs' }).click();
|
||||
await expect(page).toHaveURL(/\/logs\/logs-explorer/);
|
||||
});
|
||||
|
||||
test('TC-04: Explore Traces navigates to traces explorer', async ({ authedPage: page }) => {
|
||||
await page.getByRole('button', { name: 'Explore Traces' }).click();
|
||||
await expect(page).toHaveURL(/traces-explorer/);
|
||||
});
|
||||
|
||||
test('TC-05: Explore Metrics navigates to metrics explorer', async ({ authedPage: page }) => {
|
||||
await page.getByRole('button', { name: 'Explore Metrics' }).click();
|
||||
await expect(page).toHaveURL(/metrics-explorer/);
|
||||
});
|
||||
|
||||
test('TC-06: Open Logs Explorer shortcut navigates', async ({ authedPage: page }) => {
|
||||
await page.getByRole('button', { name: 'Open Logs Explorer' }).click();
|
||||
await expect(page).toHaveURL(/\/logs\/logs-explorer/);
|
||||
});
|
||||
|
||||
test('TC-07: Open Traces Explorer shortcut navigates', async ({ authedPage: page }) => {
|
||||
await page.getByRole('button', { name: 'Open Traces Explorer' }).click();
|
||||
await expect(page).toHaveURL(/traces-explorer/);
|
||||
});
|
||||
|
||||
test('TC-08: Open Metrics Explorer shortcut navigates', async ({ authedPage: page }) => {
|
||||
await page.getByRole('button', { name: 'Open Metrics Explorer' }).click();
|
||||
await expect(page).toHaveURL(/metrics-explorer/);
|
||||
});
|
||||
|
||||
test('TC-09: Create dashboard button navigates', async ({ authedPage: page }) => {
|
||||
await page.getByRole('button', { name: 'Create dashboard' }).click();
|
||||
await expect(page).toHaveURL(/\/dashboard/);
|
||||
});
|
||||
|
||||
test('TC-10: Create an alert button navigates', async ({ authedPage: page }) => {
|
||||
await page.getByRole('button', { name: 'Create an alert' }).click();
|
||||
await expect(page).toHaveURL(/\/alerts/);
|
||||
});
|
||||
});
|
||||
|
||||
test.describe('Home Page - Services Widget', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
await page.goto('/home', { waitUntil: 'domcontentloaded' });
|
||||
await expect(page.getByRole('columnheader', { name: 'APPLICATION' })).toBeVisible({ timeout: 30000 });
|
||||
});
|
||||
|
||||
test('TC-11: services table is visible with correct columns', async ({ authedPage: page }) => {
|
||||
await expect(page.getByRole('columnheader', { name: 'APPLICATION' })).toBeVisible();
|
||||
await expect(page.getByRole('columnheader', { name: /P99 LATENCY/i })).toBeVisible();
|
||||
await expect(page.getByRole('columnheader', { name: /ERROR RATE/i })).toBeVisible();
|
||||
await expect(page.getByRole('columnheader', { name: /OPS \/ SEC/i })).toBeVisible();
|
||||
await expect(page.getByRole('rowgroup').last().getByRole('row').first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('TC-12: All Services link navigates', async ({ authedPage: page }) => {
|
||||
await page.getByRole('link', { name: 'All Services' }).click();
|
||||
await expect(page).toHaveURL(/\/services/);
|
||||
});
|
||||
});
|
||||
|
||||
test.describe('Home Page - Alerts Widget', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
await page.goto('/home', { waitUntil: 'domcontentloaded' });
|
||||
await expect(page.getByRole('link', { name: 'All Alert Rules' })).toBeVisible({ timeout: 30000 });
|
||||
});
|
||||
|
||||
test('TC-13: alerts section shows firing alerts', async ({ authedPage: page }) => {
|
||||
await expect(page.getByRole('link', { name: 'All Alert Rules' })).toBeVisible();
|
||||
await expect(page.getByRole('button', { name: /alert-rules/ }).first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('TC-14: All Alert Rules link navigates', async ({ authedPage: page }) => {
|
||||
await page.getByRole('link', { name: 'All Alert Rules' }).click();
|
||||
await expect(page).toHaveURL(/\/alerts/);
|
||||
});
|
||||
});
|
||||
|
||||
test.describe('Home Page - Dashboards Widget', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
await page.goto('/home', { waitUntil: 'domcontentloaded' });
|
||||
await expect(page.getByRole('link', { name: 'All Dashboards' })).toBeVisible({ timeout: 30000 });
|
||||
});
|
||||
|
||||
test('TC-15: dashboards section shows recent dashboards', async ({ authedPage: page }) => {
|
||||
await expect(page.getByRole('link', { name: 'All Dashboards' })).toBeVisible();
|
||||
await expect(page.getByRole('button', { name: /alert-rules/ }).first()).toBeVisible();
|
||||
});
|
||||
|
||||
test('TC-16: All Dashboards link navigates', async ({ authedPage: page }) => {
|
||||
await page.getByRole('link', { name: 'All Dashboards' }).click();
|
||||
await expect(page).toHaveURL(/\/dashboard/);
|
||||
});
|
||||
});
|
||||
|
||||
test.describe('Home Page - Saved Views Widget', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
await page.goto('/home', { waitUntil: 'domcontentloaded' });
|
||||
await expect(page.getByRole('link', { name: 'All Views' })).toBeVisible({ timeout: 30000 });
|
||||
});
|
||||
|
||||
test('TC-17: saved views tabs switch between signal types', async ({ authedPage: page }) => {
|
||||
const logsTab = page.locator('button[value="logs"]');
|
||||
const tracesTab = page.locator('button[value="traces"]');
|
||||
const metricsTab = page.locator('button[value="metrics"]');
|
||||
|
||||
await expect(logsTab).toBeVisible();
|
||||
|
||||
await tracesTab.click();
|
||||
await expect(tracesTab).toBeVisible();
|
||||
|
||||
await metricsTab.click();
|
||||
await expect(metricsTab).toBeVisible();
|
||||
|
||||
await logsTab.click();
|
||||
await expect(logsTab).toBeVisible();
|
||||
});
|
||||
|
||||
test('TC-18: All Views link navigates to saved views', async ({ authedPage: page }) => {
|
||||
await page.locator('button[value="logs"]').click();
|
||||
await page.getByRole('link', { name: 'All Views' }).click();
|
||||
await expect(page).toHaveURL(/\/logs\/saved-views/);
|
||||
});
|
||||
});
|
||||
522
tests/e2e/legacy/roles/roles-listing.spec.ts
Normal file
522
tests/e2e/legacy/roles/roles-listing.spec.ts
Normal file
@@ -0,0 +1,522 @@
|
||||
import { test, expect } from '../../fixtures/auth';
|
||||
|
||||
test.describe('Roles Listing - Navigation and Access Control', () => {
|
||||
test(
|
||||
'Admin User Can Access Roles Page',
|
||||
async ({ authedPage: page }) => {
|
||||
|
||||
await page.goto('/settings/roles', {
|
||||
waitUntil: 'domcontentloaded',
|
||||
});
|
||||
|
||||
await expect(
|
||||
page.getByRole('heading', {
|
||||
name: 'Roles',
|
||||
exact: true,
|
||||
}),
|
||||
).toBeVisible({ timeout: 30000 });
|
||||
|
||||
await expect(page).toHaveURL(/.*\/settings\/roles/);
|
||||
|
||||
await expect(
|
||||
page.getByRole('searchbox', {
|
||||
name: 'Search for roles...',
|
||||
}),
|
||||
).toBeVisible({ timeout: 15000 });
|
||||
|
||||
const accessDenied = page.getByText('Access Denied');
|
||||
const permissionDenied = page.getByText('Permission denied');
|
||||
|
||||
const hasAccessDenied = await accessDenied.isVisible().catch(() => false);
|
||||
const hasPermissionDenied = await permissionDenied
|
||||
.isVisible()
|
||||
.catch(() => false);
|
||||
|
||||
expect(hasAccessDenied).toBe(false);
|
||||
expect(hasPermissionDenied).toBe(false);
|
||||
|
||||
await expect(page.getByRole('searchbox')).toBeVisible();
|
||||
|
||||
await expect(page.getByText('signoz-admin')).toBeVisible();
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
test.describe('Roles Listing - Page Layout and UI Components', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
await page.goto('/settings/roles');
|
||||
|
||||
await Promise.race([
|
||||
page
|
||||
.getByRole('searchbox', { name: 'Search for roles...' })
|
||||
.waitFor({ state: 'visible', timeout: 10000 }),
|
||||
page
|
||||
.getByText(/error|failed/i)
|
||||
.waitFor({ state: 'visible', timeout: 10000 }),
|
||||
]).catch(() => {});
|
||||
});
|
||||
|
||||
test(
|
||||
'Verify Roles Listing Page Layout',
|
||||
async ({ authedPage: page }) => {
|
||||
await expect(
|
||||
page.getByRole('heading', {
|
||||
name: 'Roles',
|
||||
exact: true,
|
||||
}),
|
||||
).toBeVisible();
|
||||
|
||||
const searchInput = page.getByRole('searchbox');
|
||||
await expect(searchInput).toBeVisible();
|
||||
|
||||
await expect(
|
||||
page.getByText('Name', { exact: true }).first(),
|
||||
).toBeVisible();
|
||||
await expect(
|
||||
page.getByText('Description', { exact: true }).first(),
|
||||
).toBeVisible();
|
||||
await expect(page.getByText('Updated At', { exact: true })).toBeVisible();
|
||||
await expect(page.getByText('Created At', { exact: true })).toBeVisible();
|
||||
|
||||
await expect(page.locator('body')).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Verify Table Structure',
|
||||
async ({ authedPage: page }) => {
|
||||
await expect(page.getByRole('searchbox')).toBeVisible();
|
||||
|
||||
const roleNames = [
|
||||
'signoz-admin',
|
||||
'signoz-editor',
|
||||
'signoz-viewer',
|
||||
'signoz-anonymous',
|
||||
];
|
||||
const firstRole = page.getByText(roleNames[0]);
|
||||
await expect(firstRole).toBeVisible();
|
||||
|
||||
await expect(
|
||||
page.getByRole('heading', { name: 'Managed roles' }),
|
||||
).toBeVisible();
|
||||
|
||||
await expect(page.getByText(/full administrative access/i)).toBeVisible();
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
test.describe('Roles Listing - Roles Display and Data Verification', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
await page.goto('/settings/roles');
|
||||
|
||||
// Wait for page to load
|
||||
await expect(
|
||||
page.getByRole('searchbox', { name: 'Search for roles...' }),
|
||||
).toBeVisible();
|
||||
});
|
||||
|
||||
test(
|
||||
'Verify API Response Matches UI Display',
|
||||
async ({ authedPage: page }) => {
|
||||
let apiResponse: any = null;
|
||||
|
||||
page.on('response', async (response) => {
|
||||
if (
|
||||
response.url().includes('/api/v1/roles') &&
|
||||
response.status() === 200
|
||||
) {
|
||||
apiResponse = await response.json();
|
||||
}
|
||||
});
|
||||
|
||||
await page.reload();
|
||||
|
||||
await page
|
||||
.getByRole('searchbox', { name: 'Search for roles...' })
|
||||
.waitFor({ state: 'visible', timeout: 10000 });
|
||||
|
||||
await page.waitForTimeout(1000);
|
||||
|
||||
expect(apiResponse).not.toBeNull();
|
||||
expect(apiResponse.status).toBe('success');
|
||||
|
||||
const rolesFromApi = apiResponse.data;
|
||||
expect(rolesFromApi).toBeDefined();
|
||||
expect(rolesFromApi.length).toBe(5);
|
||||
|
||||
for (const role of rolesFromApi) {
|
||||
if (role.name) {
|
||||
await expect(page.getByText(role.name)).toBeVisible();
|
||||
}
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Verify Role Categorization (Managed vs Custom)',
|
||||
async ({ authedPage: page }) => {
|
||||
await expect(page.getByRole('searchbox')).toBeVisible();
|
||||
|
||||
const managedRolesHeader = page.getByRole('heading', {
|
||||
name: 'Managed roles',
|
||||
});
|
||||
const customRolesHeader = page.getByRole('heading', {
|
||||
name: /Custom roles\s*\d+/,
|
||||
});
|
||||
|
||||
await expect(managedRolesHeader).toBeVisible();
|
||||
await expect(customRolesHeader).toBeVisible();
|
||||
|
||||
const headerText = await customRolesHeader.textContent();
|
||||
expect(headerText).toMatch(/Custom roles\s*\d+/);
|
||||
|
||||
await expect(page.getByText('signoz-admin')).toBeVisible();
|
||||
await expect(page.getByText('signoz-editor')).toBeVisible();
|
||||
await expect(page.getByText('signoz-viewer')).toBeVisible();
|
||||
await expect(page.getByText('custom-role-ui')).toBeVisible();
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
test.describe('Roles Listing - Search Functionality', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
await page.goto('/settings/roles');
|
||||
|
||||
// Wait for roles to load
|
||||
await page
|
||||
.getByRole('searchbox', { name: 'Search for roles...' })
|
||||
.waitFor({ state: 'visible', timeout: 10000 })
|
||||
.catch(() => {});
|
||||
});
|
||||
|
||||
test(
|
||||
'Search Roles by Name',
|
||||
async ({ authedPage: page }) => {
|
||||
await expect(page.getByText('signoz-admin')).toBeVisible();
|
||||
await expect(page.getByText('signoz-editor')).toBeVisible();
|
||||
await expect(page.getByText('signoz-viewer')).toBeVisible();
|
||||
|
||||
const searchInput = page.getByRole('searchbox', {
|
||||
name: 'Search for roles...',
|
||||
});
|
||||
await searchInput.fill('editor');
|
||||
|
||||
await page.waitForTimeout(300);
|
||||
|
||||
await expect(page.getByText('signoz-editor')).toBeVisible();
|
||||
|
||||
await searchInput.clear();
|
||||
await searchInput.fill(''); // Ensure it's empty
|
||||
await page.waitForTimeout(300);
|
||||
|
||||
await expect(page.getByText('signoz-admin')).toBeVisible();
|
||||
await expect(page.getByText('signoz-editor')).toBeVisible();
|
||||
await expect(page.getByText('signoz-viewer')).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Search Roles by Description',
|
||||
async ({ authedPage: page }) => {
|
||||
const searchInput = page.getByRole('searchbox', {
|
||||
name: 'Search for roles...',
|
||||
});
|
||||
await searchInput.fill('administrative');
|
||||
|
||||
await page.waitForTimeout(500);
|
||||
|
||||
await expect(page.getByText('signoz-admin')).toBeVisible();
|
||||
await expect(page.getByText(/full administrative access/i)).toBeVisible();
|
||||
|
||||
await expect(page.getByText('signoz-viewer')).toBeHidden();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Search with No Results',
|
||||
async ({ authedPage: page }) => {
|
||||
await expect(page.getByText('signoz-admin')).toBeVisible({
|
||||
timeout: 10000,
|
||||
});
|
||||
|
||||
const searchInput = page.getByRole('searchbox', {
|
||||
name: 'Search for roles...',
|
||||
});
|
||||
await searchInput.fill('NonExistentRole123XYZ');
|
||||
|
||||
await page.waitForTimeout(300);
|
||||
|
||||
const adminStillVisible = await page
|
||||
.getByText('signoz-admin')
|
||||
.isVisible()
|
||||
.catch(() => false);
|
||||
const editorStillVisible = await page
|
||||
.getByText('signoz-editor')
|
||||
.isVisible()
|
||||
.catch(() => false);
|
||||
const viewerStillVisible = await page
|
||||
.getByText('signoz-viewer')
|
||||
.isVisible()
|
||||
.catch(() => false);
|
||||
|
||||
// At least verify that not all roles are still visible (search had some effect)
|
||||
const allStillVisible =
|
||||
adminStillVisible && editorStillVisible && viewerStillVisible;
|
||||
expect(allStillVisible).toBe(false);
|
||||
|
||||
// 5. Clear search and verify roles reappear
|
||||
await searchInput.clear();
|
||||
await searchInput.fill('');
|
||||
await page.waitForTimeout(300);
|
||||
|
||||
await expect(page.getByText('signoz-admin')).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Search Case Sensitivity',
|
||||
async ({ authedPage: page }) => {
|
||||
const searchInput = page.getByRole('searchbox', {
|
||||
name: 'Search for roles...',
|
||||
});
|
||||
|
||||
await searchInput.fill('ADMIN');
|
||||
await page.waitForTimeout(300);
|
||||
await expect(page.getByText('signoz-admin')).toBeVisible();
|
||||
|
||||
await searchInput.clear();
|
||||
await searchInput.fill('admin');
|
||||
await page.waitForTimeout(300);
|
||||
await expect(page.getByText('signoz-admin')).toBeVisible();
|
||||
|
||||
await searchInput.clear();
|
||||
await searchInput.fill('AdMin');
|
||||
await page.waitForTimeout(300);
|
||||
await expect(page.getByText('signoz-admin')).toBeVisible();
|
||||
|
||||
await searchInput.clear();
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
test.describe('Roles Listing - Pagination Functionality', () => {
|
||||
test.beforeEach(async ({ authedPage: page }) => {
|
||||
await page.goto('/settings/roles');
|
||||
await expect(
|
||||
page.getByRole('heading', { name: 'Roles', exact: true }),
|
||||
).toBeVisible({ timeout: 15000 });
|
||||
await expect(
|
||||
page.getByRole('searchbox', { name: 'Search for roles...' }),
|
||||
).toBeVisible({ timeout: 15000 });
|
||||
});
|
||||
|
||||
test(
|
||||
'Navigate Between Pages',
|
||||
async ({ authedPage: page }) => {
|
||||
const paginationList = page.getByRole('list').filter({ hasText: /\d/ });
|
||||
const hasPagination = await paginationList.isVisible().catch(() => false);
|
||||
|
||||
if (!hasPagination) {
|
||||
return;
|
||||
}
|
||||
|
||||
// 1. Verify pagination controls are visible
|
||||
await expect(paginationList).toBeVisible();
|
||||
|
||||
// 2. Note the first role displayed on page 1
|
||||
const page1HasAdmin = await page.getByText('signoz-admin').isVisible();
|
||||
|
||||
// 3. Click "Next" or page "2" in pagination
|
||||
const nextButton = page.getByRole('listitem').getByText('2');
|
||||
if (await nextButton.isVisible()) {
|
||||
await nextButton.click();
|
||||
} else {
|
||||
// Try clicking next arrow
|
||||
await page.getByRole('listitem').last().click();
|
||||
}
|
||||
|
||||
// 4. Wait for page to load
|
||||
await page.waitForTimeout(1000);
|
||||
|
||||
// 5. Observe roles on page 2
|
||||
const page2HasAdmin = await page.getByText('signoz-admin').isVisible();
|
||||
|
||||
// Verify different roles are shown (or same role is hidden if paging worked)
|
||||
expect(page2HasAdmin).not.toBe(page1HasAdmin);
|
||||
|
||||
// Verify URL updates with page parameter
|
||||
await expect(page).toHaveURL(/page=2/);
|
||||
|
||||
// 6. Click "Previous" or page "1"
|
||||
const prevButton = page.getByRole('listitem').getByText('1');
|
||||
if (await prevButton.isVisible()) {
|
||||
await prevButton.click();
|
||||
} else {
|
||||
// Try clicking previous arrow
|
||||
await page.getByRole('listitem').first().click();
|
||||
}
|
||||
|
||||
// 7. Wait and verify return to page 1
|
||||
await page.waitForTimeout(1000);
|
||||
await expect(page).toHaveURL(/page=1|\/roles(?!.*page)/);
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Pagination with Search Results',
|
||||
async ({ authedPage: page }) => {
|
||||
const paginationList = page.getByRole('list').filter({ hasText: /\d/ });
|
||||
const hasPagination = await paginationList.isVisible().catch(() => false);
|
||||
|
||||
if (!hasPagination) {
|
||||
return;
|
||||
}
|
||||
|
||||
const searchInput = page.getByRole('searchbox');
|
||||
await searchInput.fill('signoz');
|
||||
|
||||
await page.waitForTimeout(500);
|
||||
|
||||
const paginationAfterSearch = await paginationList
|
||||
.isVisible()
|
||||
.catch(() => false);
|
||||
|
||||
if (paginationAfterSearch) {
|
||||
const page2Button = page.getByRole('listitem').getByText('2');
|
||||
if (await page2Button.isVisible()) {
|
||||
await page2Button.click();
|
||||
await page.waitForTimeout(500);
|
||||
|
||||
const url = page.url();
|
||||
expect(url).toContain('page=2');
|
||||
}
|
||||
}
|
||||
|
||||
await searchInput.clear();
|
||||
await page.waitForTimeout(500);
|
||||
|
||||
await expect(paginationList).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Pagination State Persistence',
|
||||
async ({ authedPage: page }) => {
|
||||
const paginationList = page.getByRole('list').filter({ hasText: /\d/ });
|
||||
const hasPagination = await paginationList.isVisible().catch(() => false);
|
||||
|
||||
if (!hasPagination) {
|
||||
return;
|
||||
}
|
||||
|
||||
const page2Button = page.getByRole('listitem').getByText('2');
|
||||
if (await page2Button.isVisible()) {
|
||||
await page2Button.click();
|
||||
await page.waitForTimeout(500);
|
||||
|
||||
await expect(page).toHaveURL(/page=2/);
|
||||
|
||||
await page.reload();
|
||||
|
||||
await expect(page).toHaveURL(/page=2/);
|
||||
|
||||
await expect(
|
||||
page.getByRole('searchbox', {
|
||||
name: 'Search for roles...',
|
||||
}),
|
||||
).toBeVisible();
|
||||
}
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
test.describe('Roles Listing - Loading and Error States', () => {
|
||||
test(
|
||||
'Verify Loading State',
|
||||
async ({ authedPage: page }) => {
|
||||
await page.route('**/api/v1/roles', async (route) => {
|
||||
await new Promise((resolve) => setTimeout(resolve, 1000));
|
||||
route.continue();
|
||||
});
|
||||
|
||||
await page.goto('/settings/roles');
|
||||
|
||||
const loadingIndicators = [
|
||||
page.locator('[class*="skeleton"]'),
|
||||
page.locator('[class*="loading"]'),
|
||||
page.locator('[class*="spinner"]'),
|
||||
page.getByRole('progressbar'),
|
||||
];
|
||||
|
||||
for (const indicator of loadingIndicators) {
|
||||
if (await indicator.isVisible().catch(() => false)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
await expect(
|
||||
page.getByRole('searchbox', {
|
||||
name: 'Search for roles...',
|
||||
}),
|
||||
).toBeVisible({ timeout: 10000 });
|
||||
|
||||
await expect(page.getByRole('heading', { name: 'Roles' })).toBeVisible();
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Handle API Error State',
|
||||
async ({ authedPage: page }) => {
|
||||
await page.route('**/api/v1/roles', async (route) => {
|
||||
route.fulfill({
|
||||
status: 500,
|
||||
contentType: 'application/json',
|
||||
body: JSON.stringify({
|
||||
status: 'error',
|
||||
error: 'Internal Server Error',
|
||||
}),
|
||||
});
|
||||
});
|
||||
|
||||
await page.goto('/settings/roles');
|
||||
|
||||
await page.waitForTimeout(2000);
|
||||
|
||||
const hasRoles = await page
|
||||
.getByText('signoz-admin')
|
||||
.isVisible()
|
||||
.catch(() => false);
|
||||
|
||||
if (!hasRoles) {
|
||||
await expect(
|
||||
page.getByRole('heading', {
|
||||
name: 'Roles',
|
||||
exact: true,
|
||||
}),
|
||||
).toBeVisible();
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
test(
|
||||
'Handle Network Failure',
|
||||
async ({ authedPage: page }) => {
|
||||
await page.route('**/api/v1/roles', async (route) => {
|
||||
route.abort('failed');
|
||||
});
|
||||
|
||||
await page.goto('/settings/roles');
|
||||
|
||||
await page.waitForTimeout(2000);
|
||||
|
||||
const hasRoles = await page
|
||||
.getByText('signoz-admin')
|
||||
.isVisible()
|
||||
.catch(() => false);
|
||||
|
||||
expect(hasRoles).toBe(false);
|
||||
|
||||
await expect(page.locator('body')).toBeVisible();
|
||||
},
|
||||
);
|
||||
});
|
||||
45
tests/e2e/package.json
Normal file
45
tests/e2e/package.json
Normal file
@@ -0,0 +1,45 @@
|
||||
{
|
||||
"name": "signoz-frontend-automation",
|
||||
"version": "1.0.0",
|
||||
"description": "E2E tests for SigNoz frontend with Playwright",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"test": "playwright test",
|
||||
"test:staging": "SIGNOZ_E2E_BASE_URL=https://app.us.staging.signoz.cloud playwright test",
|
||||
"test:ui": "playwright test --ui",
|
||||
"test:headed": "playwright test --headed",
|
||||
"test:debug": "playwright test --debug",
|
||||
"test:chromium": "playwright test --project=chromium",
|
||||
"test:firefox": "playwright test --project=firefox",
|
||||
"test:webkit": "playwright test --project=webkit",
|
||||
"report": "playwright show-report artifacts/html",
|
||||
"codegen": "playwright codegen",
|
||||
"install:browsers": "playwright install",
|
||||
"install:cli": "npm install -g @playwright/cli@latest && playwright-cli install --skills",
|
||||
"lint": "eslint . --ext .ts,.js",
|
||||
"lint:fix": "eslint . --ext .ts,.js --fix",
|
||||
"typecheck": "tsc --noEmit"
|
||||
},
|
||||
"keywords": [
|
||||
"playwright",
|
||||
"e2e",
|
||||
"testing",
|
||||
"signoz"
|
||||
],
|
||||
"author": "",
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"@playwright/test": "^1.57.0-alpha-2025-10-09",
|
||||
"@types/node": "^20.0.0",
|
||||
"@typescript-eslint/eslint-plugin": "^6.0.0",
|
||||
"@typescript-eslint/parser": "^6.0.0",
|
||||
"dotenv": "^16.0.0",
|
||||
"eslint": "^9.26.0",
|
||||
"eslint-plugin-playwright": "^0.16.0",
|
||||
"typescript": "^5.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18.0.0",
|
||||
"yarn": ">=1.22.0"
|
||||
}
|
||||
}
|
||||
11
tests/e2e/playwright-cli.json
Normal file
11
tests/e2e/playwright-cli.json
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"browser": {
|
||||
"browserName": "chromium",
|
||||
"launchOptions": { "headless": true }
|
||||
},
|
||||
"timeouts": {
|
||||
"action": 5000,
|
||||
"navigation": 30000
|
||||
},
|
||||
"outputDir": ".playwright-cli"
|
||||
}
|
||||
61
tests/e2e/playwright.config.ts
Normal file
61
tests/e2e/playwright.config.ts
Normal file
@@ -0,0 +1,61 @@
|
||||
import { defineConfig, devices } from '@playwright/test';
|
||||
import dotenv from 'dotenv';
|
||||
import path from 'path';
|
||||
|
||||
// .env holds user-provided defaults (staging creds).
|
||||
// .env.local is written by tests/e2e/bootstrap/setup.py when the pytest
|
||||
// lifecycle brings the backend up locally; override=true so local-backend
|
||||
// coordinates win over any stale .env values. Subprocess-injected env
|
||||
// (e.g. when pytest shells out to `yarn test`) still takes priority —
|
||||
// dotenv doesn't touch vars that are already set in process.env.
|
||||
dotenv.config({ path: path.resolve(__dirname, '.env') });
|
||||
dotenv.config({ path: path.resolve(__dirname, '.env.local'), override: true });
|
||||
|
||||
export default defineConfig({
|
||||
testDir: './tests',
|
||||
|
||||
// All Playwright output lands under artifacts/. One subdir per reporter
|
||||
// plus test-results/ for per-test artifacts (traces/screenshots/videos).
|
||||
// CI can archive the whole dir with `tar czf artifacts.tgz tests/e2e/artifacts`.
|
||||
outputDir: 'artifacts/test-results',
|
||||
|
||||
// Run tests in parallel
|
||||
fullyParallel: true,
|
||||
|
||||
// Fail the build on CI if you accidentally left test.only
|
||||
forbidOnly: !!process.env.CI,
|
||||
|
||||
// Retry on CI only
|
||||
retries: process.env.CI ? 2 : 0,
|
||||
|
||||
// Workers
|
||||
workers: process.env.CI ? 2 : undefined,
|
||||
|
||||
// Reporter
|
||||
reporter: [
|
||||
['html', { outputFolder: 'artifacts/html', open: 'never' }],
|
||||
['json', { outputFile: 'artifacts/json/results.json' }],
|
||||
['list'],
|
||||
],
|
||||
|
||||
// Shared settings
|
||||
use: {
|
||||
baseURL:
|
||||
process.env.SIGNOZ_E2E_BASE_URL || 'https://app.us.staging.signoz.cloud',
|
||||
trace: 'on-first-retry',
|
||||
screenshot: 'only-on-failure',
|
||||
video: 'retain-on-failure',
|
||||
colorScheme: 'dark',
|
||||
locale: 'en-US',
|
||||
viewport: { width: 1280, height: 720 },
|
||||
},
|
||||
|
||||
// Browser projects. No project-level auth — specs opt in via the
|
||||
// authedPage fixture in tests/e2e/fixtures/auth.ts, which logs a user
|
||||
// in on first use and caches the resulting storageState per worker.
|
||||
projects: [
|
||||
{ name: 'chromium', use: devices['Desktop Chrome'] },
|
||||
{ name: 'firefox', use: devices['Desktop Firefox'] },
|
||||
{ name: 'webkit', use: devices['Desktop Safari'] },
|
||||
],
|
||||
});
|
||||
337
tests/e2e/tests/alerts/alerts.spec.ts
Normal file
337
tests/e2e/tests/alerts/alerts.spec.ts
Normal file
@@ -0,0 +1,337 @@
|
||||
import { test, expect } from '../../fixtures/auth';
|
||||
|
||||
// Alert-rule regression. Flows within the file are serial: they share the
|
||||
// /alerts list page and the tenant's rule table.
|
||||
test.describe.configure({ mode: 'serial' });
|
||||
|
||||
// Minimal v2 threshold DTO used when seeding via API is simpler than
|
||||
// clicking through the query builder (tests whose focus is elsewhere).
|
||||
// Tests that exercise the form UI (TC-02 labels, TC-03 test-notification)
|
||||
// drive the UI directly instead of using this.
|
||||
function thresholdRuleBody(name: string, overrides: Record<string, unknown> = {}): unknown {
|
||||
return {
|
||||
alert: name,
|
||||
alertType: 'METRIC_BASED_ALERT',
|
||||
ruleType: 'threshold_rule',
|
||||
condition: {
|
||||
thresholds: {
|
||||
kind: 'basic',
|
||||
spec: [{ name: 'critical', target: 0, matchType: '1', op: '1', channels: [], targetUnit: '' }],
|
||||
},
|
||||
compositeQuery: {
|
||||
queryType: 'builder',
|
||||
panelType: 'graph',
|
||||
queries: [
|
||||
{
|
||||
type: 'builder_query',
|
||||
spec: {
|
||||
name: 'A',
|
||||
signal: 'metrics',
|
||||
source: '',
|
||||
stepInterval: null,
|
||||
disabled: false,
|
||||
filter: { expression: '' },
|
||||
having: { expression: '' },
|
||||
aggregations: [
|
||||
{ metricName: 'app.currency_counter', timeAggregation: 'rate', spaceAggregation: 'sum' },
|
||||
],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
selectedQueryName: 'A',
|
||||
alertOnAbsent: false,
|
||||
requireMinPoints: false,
|
||||
},
|
||||
annotations: { description: name, summary: name },
|
||||
labels: {},
|
||||
notificationSettings: {
|
||||
groupBy: [],
|
||||
usePolicy: true,
|
||||
renotify: { enabled: false, interval: '30m', alertStates: [] },
|
||||
},
|
||||
evaluation: { kind: 'rolling', spec: { evalWindow: '5m0s', frequency: '1m' } },
|
||||
schemaVersion: 'v2alpha1',
|
||||
source: 'alerts.spec.ts',
|
||||
version: 'v5',
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
async function authHeaders(page: import('@playwright/test').Page): Promise<Record<string, string>> {
|
||||
try {
|
||||
const token = await page.evaluate(() => localStorage.getItem('AUTH_TOKEN'));
|
||||
return { Authorization: `Bearer ${token}`, 'Content-Type': 'application/json' };
|
||||
} catch {
|
||||
return { 'Content-Type': 'application/json' };
|
||||
}
|
||||
}
|
||||
|
||||
async function createRule(page: import('@playwright/test').Page, body: unknown): Promise<string> {
|
||||
const resp = await page.request.post('/api/v2/rules', { data: body, headers: await authHeaders(page) });
|
||||
if (resp.status() !== 201) throw new Error(`POST /api/v2/rules ${resp.status()}: ${await resp.text()}`);
|
||||
const json = await resp.json();
|
||||
return json.data.id as string;
|
||||
}
|
||||
|
||||
// Best-effort teardown: silently skip if the page was torn down mid-test.
|
||||
async function deleteRuleIfExists(page: import('@playwright/test').Page, id: string | undefined): Promise<void> {
|
||||
if (!id) return;
|
||||
try {
|
||||
await page.request.delete(`/api/v2/rules/${id}`, { headers: await authHeaders(page) });
|
||||
} catch {
|
||||
/* page closed — best effort */
|
||||
}
|
||||
}
|
||||
|
||||
// Purge any rules left over from earlier runs with the same name so each
|
||||
// test starts from a clean slate. The list endpoint returns all rules,
|
||||
// so we filter locally.
|
||||
async function purgeRulesByName(
|
||||
page: import('@playwright/test').Page,
|
||||
name: string,
|
||||
): Promise<void> {
|
||||
const resp = await page.request.get('/api/v1/rules', { headers: await authHeaders(page) });
|
||||
if (!resp.ok()) return;
|
||||
const body = await resp.json();
|
||||
const items: Array<{ id: string; alert?: string }> = body?.data?.rules ?? [];
|
||||
const orphans = items.filter((r) => r.alert === name);
|
||||
for (const orphan of orphans) {
|
||||
await page.request
|
||||
.delete(`/api/v2/rules/${orphan.id}`, { headers: await authHeaders(page) })
|
||||
.catch(() => undefined);
|
||||
}
|
||||
}
|
||||
|
||||
// Row action menu: the trigger is the lucide ellipsis button inside the
|
||||
// [data-testid="alert-actions"] wrapper. Menu items render in an Ant portal
|
||||
// outside the row with role=menuitem — find them at page scope.
|
||||
async function openRowActions(
|
||||
row: import('@playwright/test').Locator,
|
||||
): Promise<void> {
|
||||
await row.locator('[data-testid="alert-actions"] button').first().click();
|
||||
}
|
||||
|
||||
test('TC-01 alerts list — toggle disable/enable/delete via UI', async ({ authedPage: page }) => {
|
||||
await page.goto('/alerts?tab=AlertRules');
|
||||
const name = 'alerts-list-rule';
|
||||
await purgeRulesByName(page, name);
|
||||
const id = await createRule(page, thresholdRuleBody(name));
|
||||
|
||||
try {
|
||||
await page.reload();
|
||||
const row = page.locator('tr', { hasText: name });
|
||||
await expect(row).toBeVisible();
|
||||
|
||||
await openRowActions(row);
|
||||
const patchDisable = page.waitForResponse(
|
||||
(r) => r.url().includes(`/rules/${id}`) && r.request().method() === 'PATCH',
|
||||
);
|
||||
await page.getByRole('menuitem').filter({ hasText: /^disable$/i }).click();
|
||||
await patchDisable;
|
||||
await expect(row).toContainText(/disabled/i);
|
||||
|
||||
await openRowActions(row);
|
||||
const patchEnable = page.waitForResponse(
|
||||
(r) => r.url().includes(`/rules/${id}`) && r.request().method() === 'PATCH',
|
||||
);
|
||||
await page.getByRole('menuitem').filter({ hasText: /^enable$/i }).click();
|
||||
await patchEnable;
|
||||
await expect(row).not.toContainText(/disabled/i);
|
||||
|
||||
await openRowActions(row);
|
||||
const delWait = page.waitForResponse(
|
||||
(r) => r.url().includes(`/rules/${id}`) && r.request().method() === 'DELETE',
|
||||
);
|
||||
await page.getByRole('menuitem').filter({ hasText: /^delete$/i }).click();
|
||||
await delWait;
|
||||
await expect(page.locator('tr', { hasText: name })).toHaveCount(0);
|
||||
} finally {
|
||||
await deleteRuleIfExists(page, id);
|
||||
}
|
||||
});
|
||||
|
||||
test('TC-02 rule labels — create via UI, round-trip, edit', async ({ authedPage: page }) => {
|
||||
// Drive the V2 form UI to exercise the label input component —
|
||||
// this test's focus is label add/remove, so no API shortcut here.
|
||||
const name = 'labels-rule';
|
||||
let id: string | undefined;
|
||||
|
||||
await page.goto('/alerts?tab=AlertRules');
|
||||
await purgeRulesByName(page, name);
|
||||
|
||||
try {
|
||||
await page.goto('/alerts/new');
|
||||
await page.getByTestId('alert-name-input').fill(name);
|
||||
|
||||
// Add two labels via the label input component. After submitting a
|
||||
// value, the input flips back to key mode — the "+ Add labels" button
|
||||
// is only present on first entry, so click it conditionally.
|
||||
for (const [k, v] of [['env', 'prod'], ['severity', 'warn']] as const) {
|
||||
const addBtn = page.getByTestId('alert-add-label-button');
|
||||
if (await addBtn.isVisible().catch(() => false)) {
|
||||
await addBtn.click();
|
||||
}
|
||||
await page.getByTestId('alert-add-label-input').fill(k);
|
||||
await page.keyboard.press('Enter');
|
||||
await page.getByTestId('alert-add-label-input').fill(v);
|
||||
await page.keyboard.press('Enter');
|
||||
}
|
||||
// Pills render immediately in the form header from local state.
|
||||
await expect(page.getByTestId('label-pill-env-prod')).toBeVisible();
|
||||
await expect(page.getByTestId('label-pill-severity-warn')).toBeVisible();
|
||||
|
||||
// Saving from the V2 form requires a valid query-builder setup, which
|
||||
// is incidental to this test's label-round-trip focus. Persist with
|
||||
// the same labels via API and navigate to the overview to assert the
|
||||
// saved-side UI rendering — the actual round-trip this test covers.
|
||||
id = await createRule(page, thresholdRuleBody(name, { labels: { env: 'prod', severity: 'warn' } }));
|
||||
|
||||
await page.goto(`/alerts/overview?ruleId=${id}`);
|
||||
await expect(page.getByTestId('label-pill-env-prod')).toBeVisible();
|
||||
await expect(page.getByTestId('label-pill-severity-warn')).toBeVisible();
|
||||
|
||||
// Remove severity via edit and re-check.
|
||||
await page.request.put(`/api/v2/rules/${id}`, {
|
||||
data: thresholdRuleBody(name, { labels: { env: 'prod' } }),
|
||||
headers: await authHeaders(page),
|
||||
});
|
||||
await page.goto(`/alerts/overview?ruleId=${id}`);
|
||||
await expect(page.getByTestId('label-pill-env-prod')).toBeVisible();
|
||||
await expect(page.getByTestId('label-pill-severity-warn')).toHaveCount(0);
|
||||
} finally {
|
||||
await deleteRuleIfExists(page, id);
|
||||
}
|
||||
});
|
||||
|
||||
test('TC-03 test-notification — Test Notification button disabled on a fresh V2 form', async ({
|
||||
authedPage: page,
|
||||
}) => {
|
||||
// Fresh /alerts/new in V2 mode. The Test Notification button is gated
|
||||
// on query-builder completion — we assert the disabled pre-state here.
|
||||
// The legacy flow also probed /api/v2/rules/test for a 200 response
|
||||
// with `alertCount`; that probe is covered by the integration suite
|
||||
// where metric data is seeded. Driving it here would require
|
||||
// bespoke metric seeding that isn't on the critical path for UI regression.
|
||||
await page.goto('/alerts/new');
|
||||
const testBtn = page.getByRole('button', { name: /test notification/i });
|
||||
await expect(testBtn).toBeVisible();
|
||||
await expect(testBtn).toBeDisabled();
|
||||
|
||||
// Save Alert Rule button shares the same gating.
|
||||
const saveBtn = page.getByRole('button', { name: /save alert rule/i });
|
||||
await expect(saveBtn).toBeDisabled();
|
||||
});
|
||||
|
||||
test('TC-04 alert details — overview, history, AlertNotFound', async ({ authedPage: page }) => {
|
||||
await page.goto('/alerts?tab=AlertRules');
|
||||
const name = 'details-rule';
|
||||
await purgeRulesByName(page, name);
|
||||
const id = await createRule(page, thresholdRuleBody(name, { labels: { severity: 'warning' } }));
|
||||
|
||||
try {
|
||||
await page.goto(`/alerts/overview?ruleId=${id}`);
|
||||
await expect(page.getByTestId('alert-name-input')).toBeVisible();
|
||||
|
||||
await page.getByRole('tab', { name: /history/i }).click();
|
||||
// History tab renders either "Total Triggered" or a table — assert on
|
||||
// URL state and tab-switch completion rather than fragile inner copy.
|
||||
await expect(page).toHaveURL(/tab=History|history/i);
|
||||
|
||||
// Bogus UUID → 404 page (document.title set to "Alert Not Found").
|
||||
await page.goto('/alerts/overview?ruleId=00000000-0000-0000-0000-000000000000');
|
||||
await expect(page).toHaveTitle(/alert not found/i);
|
||||
|
||||
// Missing ruleId → apology copy.
|
||||
await page.goto('/alerts/overview');
|
||||
await expect(page.getByText(/couldn'?t find/i)).toBeVisible();
|
||||
|
||||
// Deleted ruleId → also "Alert Not Found".
|
||||
await page.request.delete(`/api/v2/rules/${id}`, { headers: await authHeaders(page) });
|
||||
await page.goto(`/alerts/overview?ruleId=${id}`);
|
||||
await expect(page).toHaveTitle(/alert not found/i);
|
||||
} finally {
|
||||
await deleteRuleIfExists(page, id);
|
||||
}
|
||||
});
|
||||
|
||||
test('TC-07 anomaly alerts — type selection, CRUD, toggle asymmetry', async ({ authedPage: page }) => {
|
||||
// Type-selection renders alert-type cards. Anomaly is gated on the
|
||||
// ANOMALY_DETECTION feature flag (EE-only); community builds hide it.
|
||||
await page.goto('/alerts/type-selection');
|
||||
await expect(page.getByTestId('alert-type-card-METRIC_BASED_ALERT')).toBeVisible();
|
||||
|
||||
const anomalyCard = page.getByTestId('alert-type-card-ANOMALY_BASED_ALERT');
|
||||
const anomalyEnabled = await anomalyCard.isVisible().catch(() => false);
|
||||
if (!anomalyEnabled) {
|
||||
// Community: anomaly card hidden, classic anomaly form not available
|
||||
// either. The remaining legacy Flow-6 assertions are EE-only.
|
||||
test.skip(true, 'ANOMALY_DETECTION feature flag not enabled on this build');
|
||||
return;
|
||||
}
|
||||
|
||||
await expect(anomalyCard.getByText('Beta')).toBeVisible();
|
||||
await anomalyCard.click();
|
||||
await page.waitForURL(/ruleType=anomaly_rule.*alertType=METRIC_BASED_ALERT/);
|
||||
await expect(page.locator('button[value="anomaly_rule"]')).toHaveClass(/selected/);
|
||||
await expect(page.locator('.create-alert-v2-footer')).toHaveCount(0);
|
||||
|
||||
// Seed anomaly rule via API (classic-form query setup is incidental to
|
||||
// the toggle-behavior assertion this test is focused on).
|
||||
const name = 'anomaly-rule';
|
||||
await purgeRulesByName(page, name);
|
||||
const id = await createRule(page, {
|
||||
...(thresholdRuleBody(name, { labels: { severity: 'warning' } }) as Record<string, unknown>),
|
||||
ruleType: 'anomaly_rule',
|
||||
condition: {
|
||||
thresholds: {
|
||||
kind: 'basic',
|
||||
spec: [{ name: 'critical', target: 3, matchType: '1', op: '1', channels: [], targetUnit: '' }],
|
||||
},
|
||||
compositeQuery: {
|
||||
queryType: 'builder',
|
||||
panelType: 'graph',
|
||||
queries: [
|
||||
{
|
||||
type: 'builder_query',
|
||||
spec: {
|
||||
name: 'A',
|
||||
signal: 'metrics',
|
||||
source: '',
|
||||
stepInterval: null,
|
||||
disabled: false,
|
||||
filter: { expression: '' },
|
||||
having: { expression: '' },
|
||||
aggregations: [
|
||||
{ metricName: 'app.currency_counter', timeAggregation: 'rate', spaceAggregation: 'sum' },
|
||||
],
|
||||
functions: [{ name: 'anomaly', args: [{ name: 'z_score_threshold', value: 3 }] }],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
selectedQueryName: 'A',
|
||||
alertOnAbsent: false,
|
||||
requireMinPoints: false,
|
||||
algorithm: 'standard',
|
||||
seasonality: 'hourly',
|
||||
},
|
||||
});
|
||||
|
||||
try {
|
||||
// Asymmetric toggle: anomaly → threshold transitions classic → V2
|
||||
// (run-6 observation: no return path once in threshold).
|
||||
await page.goto('/alerts/new?ruleType=anomaly_rule&alertType=METRIC_BASED_ALERT');
|
||||
await page.locator('button[value="threshold_rule"]').click();
|
||||
await expect(page).toHaveURL(/ruleType=threshold_rule/);
|
||||
await expect(page.locator('.create-alert-v2-footer')).toBeVisible();
|
||||
await expect(page.locator('button[value="anomaly_rule"]')).toHaveCount(0);
|
||||
|
||||
// Legacy Flow 6.9 probed /api/v2/rules/test with the anomaly DTO
|
||||
// for contract coverage. That probe hits metric-metadata lookup on
|
||||
// the BE which fails without seeded metric data (same path as TC-03).
|
||||
// Covered in the integration suite where metrics are seeded.
|
||||
} finally {
|
||||
await deleteRuleIfExists(page, id);
|
||||
}
|
||||
});
|
||||
214
tests/e2e/tests/alerts/cascade-delete.spec.ts
Normal file
214
tests/e2e/tests/alerts/cascade-delete.spec.ts
Normal file
@@ -0,0 +1,214 @@
|
||||
import { test, expect } from '../../fixtures/auth';
|
||||
|
||||
// Junction behavior: a rule linked to a downtime can't be deleted, and
|
||||
// a downtime that references a rule can't be deleted either. Both surface
|
||||
// as HTTP 409 `already_exists`.
|
||||
test.describe.configure({ mode: 'serial' });
|
||||
|
||||
async function authHeaders(page: import('@playwright/test').Page): Promise<Record<string, string>> {
|
||||
try {
|
||||
const token = await page.evaluate(() => localStorage.getItem('AUTH_TOKEN'));
|
||||
return { Authorization: `Bearer ${token}`, 'Content-Type': 'application/json' };
|
||||
} catch {
|
||||
return { 'Content-Type': 'application/json' };
|
||||
}
|
||||
}
|
||||
|
||||
// Purge leftovers from prior runs. Downtimes are deleted first because
|
||||
// they hold alertIds — deleting the rule first would 409 via the very
|
||||
// cascade constraint this test probes.
|
||||
async function purgeDowntimesByName(
|
||||
page: import('@playwright/test').Page,
|
||||
name: string,
|
||||
): Promise<void> {
|
||||
const resp = await page.request.get('/api/v1/downtime_schedules', { headers: await authHeaders(page) });
|
||||
if (!resp.ok()) return;
|
||||
const body = await resp.json();
|
||||
const items: Array<{ id: string; name?: string }> = body?.data ?? [];
|
||||
for (const orphan of items.filter((d) => d.name === name)) {
|
||||
// Unlink first to break any cascade constraint, then delete.
|
||||
await page.request
|
||||
.put(`/api/v1/downtime_schedules/${orphan.id}`, {
|
||||
data: {
|
||||
name: orphan.name,
|
||||
description: 'purge',
|
||||
schedule: {
|
||||
timezone: 'UTC',
|
||||
startTime: new Date().toISOString(),
|
||||
endTime: new Date(Date.now() + 60_000).toISOString(),
|
||||
recurrence: null,
|
||||
},
|
||||
alertIds: [],
|
||||
},
|
||||
headers: await authHeaders(page),
|
||||
})
|
||||
.catch(() => undefined);
|
||||
await page.request
|
||||
.delete(`/api/v1/downtime_schedules/${orphan.id}`, { headers: await authHeaders(page) })
|
||||
.catch(() => undefined);
|
||||
}
|
||||
}
|
||||
|
||||
async function purgeRulesByName(
|
||||
page: import('@playwright/test').Page,
|
||||
name: string,
|
||||
): Promise<void> {
|
||||
const resp = await page.request.get('/api/v1/rules', { headers: await authHeaders(page) });
|
||||
if (!resp.ok()) return;
|
||||
const body = await resp.json();
|
||||
const items: Array<{ id: string; alert?: string }> = body?.data?.rules ?? [];
|
||||
for (const orphan of items.filter((r) => r.alert === name)) {
|
||||
await page.request
|
||||
.delete(`/api/v2/rules/${orphan.id}`, { headers: await authHeaders(page) })
|
||||
.catch(() => undefined);
|
||||
}
|
||||
}
|
||||
|
||||
function thresholdRuleBody(name: string): unknown {
|
||||
return {
|
||||
alert: name,
|
||||
alertType: 'METRIC_BASED_ALERT',
|
||||
ruleType: 'threshold_rule',
|
||||
condition: {
|
||||
thresholds: {
|
||||
kind: 'basic',
|
||||
spec: [{ name: 'critical', target: 0, matchType: '1', op: '1', channels: [], targetUnit: '' }],
|
||||
},
|
||||
compositeQuery: {
|
||||
queryType: 'builder',
|
||||
panelType: 'graph',
|
||||
queries: [
|
||||
{
|
||||
type: 'builder_query',
|
||||
spec: {
|
||||
name: 'A',
|
||||
signal: 'metrics',
|
||||
source: '',
|
||||
stepInterval: null,
|
||||
disabled: false,
|
||||
filter: { expression: '' },
|
||||
having: { expression: '' },
|
||||
aggregations: [
|
||||
{ metricName: 'app.currency_counter', timeAggregation: 'rate', spaceAggregation: 'sum' },
|
||||
],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
selectedQueryName: 'A',
|
||||
alertOnAbsent: false,
|
||||
requireMinPoints: false,
|
||||
},
|
||||
annotations: { description: name, summary: name },
|
||||
labels: { severity: 'warning' },
|
||||
notificationSettings: {
|
||||
groupBy: [],
|
||||
usePolicy: true,
|
||||
renotify: { enabled: false, interval: '30m', alertStates: [] },
|
||||
},
|
||||
evaluation: { kind: 'rolling', spec: { evalWindow: '5m0s', frequency: '1m' } },
|
||||
schemaVersion: 'v2alpha1',
|
||||
source: 'cascade-delete.spec.ts',
|
||||
version: 'v5',
|
||||
};
|
||||
}
|
||||
|
||||
test('TC-06 cascade-delete — 409 on linked rule and linked downtime', async ({ authedPage: page }) => {
|
||||
const ruleName = 'cascade-rule';
|
||||
const downtimeName = 'cascade-downtime';
|
||||
let ruleId: string | undefined;
|
||||
let downtimeId: string | undefined;
|
||||
|
||||
try {
|
||||
await page.goto('/alerts?tab=AlertRules');
|
||||
await purgeDowntimesByName(page, downtimeName);
|
||||
await purgeRulesByName(page, ruleName);
|
||||
|
||||
// Seed the pair via API — this test's focus is the BE 409 contract
|
||||
// surfaced through the UI delete flow, not the creation UX.
|
||||
const ruleResp = await page.request.post('/api/v2/rules', {
|
||||
data: thresholdRuleBody(ruleName),
|
||||
headers: await authHeaders(page),
|
||||
});
|
||||
expect(ruleResp.status()).toBe(201);
|
||||
ruleId = (await ruleResp.json()).data.id as string;
|
||||
|
||||
const now = Date.now();
|
||||
const dtResp = await page.request.post('/api/v1/downtime_schedules', {
|
||||
data: {
|
||||
name: downtimeName,
|
||||
description: 'cascade-delete.spec.ts',
|
||||
schedule: {
|
||||
timezone: 'UTC',
|
||||
startTime: new Date(now).toISOString(),
|
||||
endTime: new Date(now + 24 * 60 * 60 * 1000).toISOString(),
|
||||
recurrence: null,
|
||||
},
|
||||
alertIds: [ruleId],
|
||||
},
|
||||
headers: await authHeaders(page),
|
||||
});
|
||||
expect(dtResp.status()).toBeLessThan(300);
|
||||
const dtJson = await dtResp.json();
|
||||
downtimeId = (dtJson.data?.id ?? dtJson.id) as string;
|
||||
|
||||
// Trigger rule delete via the list action menu. Capture the 409 from
|
||||
// the network layer — the actual UI toast carries the same message
|
||||
// but asserting on toasts is tenant-state-sensitive (stacking).
|
||||
await page.goto('/alerts?tab=AlertRules');
|
||||
const row = page.locator('tr', { hasText: ruleName });
|
||||
await expect(row).toBeVisible();
|
||||
await row.locator('[data-testid="alert-actions"] button').first().click();
|
||||
const ruleDeleteWait = page.waitForResponse(
|
||||
(r) => r.url().includes(`/rules/${ruleId}`) && r.request().method() === 'DELETE',
|
||||
);
|
||||
await page.getByRole('menuitem').filter({ hasText: /^delete$/i }).click();
|
||||
const ruleDelResp = await ruleDeleteWait;
|
||||
// The V1 delete endpoint that the UI action-menu hits surfaces the
|
||||
// underlying FK constraint as a 409. The V2 endpoint wraps this in
|
||||
// a friendlier `already_exists` shape but isn't what the list UI calls.
|
||||
expect(ruleDelResp.status()).toBe(409);
|
||||
const ruleBodyText = await ruleDelResp.text();
|
||||
expect(ruleBodyText).toMatch(/planned_maintenance_rule|foreign key|cannot delete rule|referenced/i);
|
||||
|
||||
// Direct API 409 probe for the downtime side. The planned-downtime
|
||||
// list uses accordion/lucide-SVG controls that aren't testid-tagged;
|
||||
// the UI-trigger assertion is covered by the rule side above.
|
||||
const dtDelResp = await page.request.delete(`/api/v1/downtime_schedules/${downtimeId}`, {
|
||||
headers: await authHeaders(page),
|
||||
});
|
||||
expect(dtDelResp.status()).toBe(409);
|
||||
const dtBodyText = await dtDelResp.text();
|
||||
expect(dtBodyText).toMatch(
|
||||
/already_exists|cannot delete planned maintenance|referenced|foreign key/i,
|
||||
);
|
||||
} finally {
|
||||
// Unlink to break the cycle, then drop both.
|
||||
if (downtimeId) {
|
||||
await page.request
|
||||
.put(`/api/v1/downtime_schedules/${downtimeId}`, {
|
||||
data: {
|
||||
name: downtimeName,
|
||||
description: 'cleanup',
|
||||
schedule: {
|
||||
timezone: 'UTC',
|
||||
startTime: new Date(Date.now()).toISOString(),
|
||||
endTime: new Date(Date.now() + 24 * 60 * 60 * 1000).toISOString(),
|
||||
recurrence: null,
|
||||
},
|
||||
alertIds: [],
|
||||
},
|
||||
headers: await authHeaders(page),
|
||||
})
|
||||
.catch(() => undefined);
|
||||
await page.request
|
||||
.delete(`/api/v1/downtime_schedules/${downtimeId}`, { headers: await authHeaders(page) })
|
||||
.catch(() => undefined);
|
||||
}
|
||||
if (ruleId) {
|
||||
await page.request
|
||||
.delete(`/api/v2/rules/${ruleId}`, { headers: await authHeaders(page) })
|
||||
.catch(() => undefined);
|
||||
}
|
||||
}
|
||||
});
|
||||
98
tests/e2e/tests/alerts/downtime.spec.ts
Normal file
98
tests/e2e/tests/alerts/downtime.spec.ts
Normal file
@@ -0,0 +1,98 @@
|
||||
import { test, expect } from '../../fixtures/auth';
|
||||
|
||||
test.describe.configure({ mode: 'serial' });
|
||||
|
||||
async function authHeaders(page: import('@playwright/test').Page): Promise<Record<string, string>> {
|
||||
const token = await page.evaluate(() => localStorage.getItem('AUTH_TOKEN'));
|
||||
return { Authorization: `Bearer ${token}`, 'Content-Type': 'application/json' };
|
||||
}
|
||||
|
||||
async function deleteDowntimeIfExists(
|
||||
page: import('@playwright/test').Page,
|
||||
id: string | undefined,
|
||||
): Promise<void> {
|
||||
if (!id) return;
|
||||
await page.request
|
||||
.delete(`/api/v1/downtime_schedules/${id}`, { headers: await authHeaders(page) })
|
||||
.catch(() => undefined);
|
||||
}
|
||||
|
||||
test('TC-05 planned downtime — CRUD round-trip', async ({ authedPage: page }) => {
|
||||
const createName = 'downtime-once';
|
||||
const editedName = 'downtime-once-edited';
|
||||
let id: string | undefined;
|
||||
|
||||
try {
|
||||
// List renders (header/table visible — tenant may or may not have rows).
|
||||
await page.goto('/alerts?tab=Configuration&subTab=planned-downtime');
|
||||
await expect(page.locator('.ant-collapse, table, .ant-spin').first()).toBeVisible();
|
||||
|
||||
// Empty-form validation: name only → click Add → required field errors.
|
||||
await page.getByRole('button', { name: /new downtime/i }).click();
|
||||
const nameField = page.getByRole('textbox', { name: /name/i }).first();
|
||||
await nameField.fill(createName);
|
||||
await page.getByRole('button', { name: /add downtime schedule/i }).click();
|
||||
// Required-field validation fires on all unfilled date/select fields.
|
||||
await expect(
|
||||
page.getByText(/please enter (starts from|ends on|timezone)/i).first(),
|
||||
).toBeVisible();
|
||||
await page.keyboard.press('Escape');
|
||||
|
||||
// Create via direct API. The Ant DatePicker calendar-cell path is
|
||||
// historically brittle across timezone and cells-in-view indices
|
||||
// (legacy run-4 documented this); this test's focus is the CRUD
|
||||
// round-trip + list UI, not the picker UX, so we POST directly.
|
||||
const now = Date.now();
|
||||
const createResp = await page.request.post('/api/v1/downtime_schedules', {
|
||||
data: {
|
||||
name: createName,
|
||||
description: 'downtime.spec.ts',
|
||||
schedule: {
|
||||
timezone: 'UTC',
|
||||
startTime: new Date(now).toISOString(),
|
||||
endTime: new Date(now + 24 * 60 * 60 * 1000).toISOString(),
|
||||
recurrence: null,
|
||||
},
|
||||
alertIds: [],
|
||||
},
|
||||
headers: await authHeaders(page),
|
||||
});
|
||||
expect(createResp.status()).toBeLessThan(300);
|
||||
const createJson = await createResp.json();
|
||||
id = (createJson.data?.id ?? createJson.id) as string;
|
||||
|
||||
await page.goto('/alerts?tab=Configuration&subTab=planned-downtime');
|
||||
await expect(page.getByText(createName, { exact: false })).toBeVisible();
|
||||
|
||||
// Edit via direct API (pencil icon is a lucide SVG with no testid
|
||||
// and its click-area is historically racey under test-runner speed).
|
||||
const editResp = await page.request.put(`/api/v1/downtime_schedules/${id}`, {
|
||||
data: {
|
||||
name: editedName,
|
||||
description: 'downtime.spec.ts-edited',
|
||||
schedule: {
|
||||
timezone: 'UTC',
|
||||
startTime: new Date(now).toISOString(),
|
||||
endTime: new Date(now + 24 * 60 * 60 * 1000).toISOString(),
|
||||
recurrence: null,
|
||||
},
|
||||
alertIds: [],
|
||||
},
|
||||
headers: await authHeaders(page),
|
||||
});
|
||||
expect(editResp.status()).toBeLessThan(300);
|
||||
await page.reload();
|
||||
await expect(page.getByText(editedName, { exact: false })).toBeVisible();
|
||||
|
||||
// Delete via direct API; verify the UI reflects it.
|
||||
const delResp = await page.request.delete(`/api/v1/downtime_schedules/${id}`, {
|
||||
headers: await authHeaders(page),
|
||||
});
|
||||
expect(delResp.status()).toBeLessThan(300);
|
||||
id = undefined;
|
||||
await page.reload();
|
||||
await expect(page.getByText(editedName, { exact: false })).toHaveCount(0);
|
||||
} finally {
|
||||
await deleteDowntimeIfExists(page, id);
|
||||
}
|
||||
});
|
||||
23
tests/e2e/tsconfig.json
Normal file
23
tests/e2e/tsconfig.json
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2020",
|
||||
"module": "commonjs",
|
||||
"moduleResolution": "bundler",
|
||||
"lib": ["ES2020"],
|
||||
"strict": true,
|
||||
"esModuleInterop": true,
|
||||
"skipLibCheck": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"resolveJsonModule": true,
|
||||
"types": ["node", "@playwright/test"],
|
||||
"paths": {
|
||||
"@tests/*": ["./tests/*"],
|
||||
"@utils/*": ["./utils/*"],
|
||||
"@specs/*": ["./specs/*"]
|
||||
},
|
||||
"outDir": "./dist",
|
||||
"rootDir": "."
|
||||
},
|
||||
"include": ["tests/**/*.ts", "utils/**/*.ts", "playwright.config.ts"],
|
||||
"exclude": ["node_modules", "dist"]
|
||||
}
|
||||
1480
tests/e2e/yarn.lock
Normal file
1480
tests/e2e/yarn.lock
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,18 +1,118 @@
|
||||
import base64
|
||||
import json
|
||||
import time
|
||||
from datetime import datetime, timedelta
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import List
|
||||
from typing import Callable, List
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.fs import get_testdata_file_path
|
||||
from fixtures.logger import setup_logger
|
||||
from fixtures.logs import Logs
|
||||
from fixtures.metrics import Metrics
|
||||
from fixtures.traces import Traces
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
|
||||
@pytest.fixture(name="create_alert_rule", scope="function")
|
||||
def create_alert_rule(
|
||||
signoz: types.SigNoz, get_token: Callable[[str, str], str]
|
||||
) -> Callable[[dict], str]:
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
rule_ids = []
|
||||
|
||||
def _create_alert_rule(rule_data: dict) -> str:
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/rules"),
|
||||
json=rule_data,
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert (
|
||||
response.status_code == HTTPStatus.OK
|
||||
), f"Failed to create rule, api returned {response.status_code} with response: {response.text}"
|
||||
rule_id = response.json()["data"]["id"]
|
||||
rule_ids.append(rule_id)
|
||||
return rule_id
|
||||
|
||||
def _delete_alert_rule(rule_id: str):
|
||||
logger.info("Deleting rule: %s", {"rule_id": rule_id})
|
||||
response = requests.delete(
|
||||
signoz.self.host_configs["8080"].get(f"/api/v1/rules/{rule_id}"),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
if response.status_code != HTTPStatus.OK:
|
||||
raise Exception( # pylint: disable=broad-exception-raised
|
||||
f"Failed to delete rule, api returned {response.status_code} with response: {response.text}"
|
||||
)
|
||||
|
||||
yield _create_alert_rule
|
||||
# delete the rule on cleanup
|
||||
for rule_id in rule_ids:
|
||||
try:
|
||||
_delete_alert_rule(rule_id)
|
||||
except Exception as e: # pylint: disable=broad-exception-caught
|
||||
logger.error("Error deleting rule: %s", {"rule_id": rule_id, "error": e})
|
||||
|
||||
|
||||
@pytest.fixture(name="insert_alert_data", scope="function")
|
||||
def insert_alert_data(
|
||||
insert_metrics: Callable[[List[Metrics]], None],
|
||||
insert_traces: Callable[[List[Traces]], None],
|
||||
insert_logs: Callable[[List[Logs]], None],
|
||||
) -> Callable[[List[types.AlertData]], None]:
|
||||
|
||||
def _insert_alert_data(
|
||||
alert_data_items: List[types.AlertData],
|
||||
base_time: datetime = None,
|
||||
) -> None:
|
||||
|
||||
metrics: List[Metrics] = []
|
||||
traces: List[Traces] = []
|
||||
logs: List[Logs] = []
|
||||
|
||||
now = base_time or datetime.now(tz=timezone.utc).replace(
|
||||
second=0, microsecond=0
|
||||
)
|
||||
|
||||
for data_item in alert_data_items:
|
||||
if data_item.type == "metrics":
|
||||
_metrics = Metrics.load_from_file(
|
||||
get_testdata_file_path(data_item.data_path),
|
||||
base_time=now,
|
||||
)
|
||||
metrics.extend(_metrics)
|
||||
elif data_item.type == "traces":
|
||||
_traces = Traces.load_from_file(
|
||||
get_testdata_file_path(data_item.data_path),
|
||||
base_time=now,
|
||||
)
|
||||
traces.extend(_traces)
|
||||
elif data_item.type == "logs":
|
||||
_logs = Logs.load_from_file(
|
||||
get_testdata_file_path(data_item.data_path),
|
||||
base_time=now,
|
||||
)
|
||||
logs.extend(_logs)
|
||||
|
||||
# Add data to ClickHouse if any data is present
|
||||
if len(metrics) > 0:
|
||||
insert_metrics(metrics)
|
||||
if len(traces) > 0:
|
||||
insert_traces(traces)
|
||||
if len(logs) > 0:
|
||||
insert_logs(logs)
|
||||
|
||||
yield _insert_alert_data
|
||||
|
||||
|
||||
def collect_webhook_firing_alerts(
|
||||
webhook_test_container: types.TestContainerDocker, notification_channel_name: str
|
||||
) -> List[types.FiringAlert]:
|
||||
438
tests/fixtures/auth.py
vendored
Normal file
438
tests/fixtures/auth.py
vendored
Normal file
@@ -0,0 +1,438 @@
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, Dict, List, Tuple
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
from wiremock.client import Mappings
|
||||
from wiremock.constants import Config
|
||||
from wiremock.resources.mappings import (
|
||||
HttpMethods,
|
||||
Mapping,
|
||||
MappingRequest,
|
||||
MappingResponse,
|
||||
WireMockMatchers,
|
||||
)
|
||||
|
||||
from fixtures import reuse, types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
USER_ADMIN_NAME = "admin"
|
||||
USER_ADMIN_EMAIL = "admin@integration.test"
|
||||
USER_ADMIN_PASSWORD = "password123Z$"
|
||||
|
||||
USER_EDITOR_NAME = "editor"
|
||||
USER_EDITOR_EMAIL = "editor@integration.test"
|
||||
USER_EDITOR_PASSWORD = "password123Z$"
|
||||
|
||||
USER_VIEWER_NAME = "viewer"
|
||||
USER_VIEWER_EMAIL = "viewer@integration.test"
|
||||
USER_VIEWER_PASSWORD = "password123Z$"
|
||||
|
||||
USERS_BASE = "/api/v2/users"
|
||||
|
||||
|
||||
@pytest.fixture(name="create_user_admin", scope="package")
|
||||
def create_user_admin(
|
||||
signoz: types.SigNoz, request: pytest.FixtureRequest, pytestconfig: pytest.Config
|
||||
) -> types.Operation:
|
||||
def create() -> None:
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/register"),
|
||||
json={
|
||||
"name": USER_ADMIN_NAME,
|
||||
"orgName": "",
|
||||
"email": USER_ADMIN_EMAIL,
|
||||
"password": USER_ADMIN_PASSWORD,
|
||||
},
|
||||
timeout=5,
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
|
||||
return types.Operation(name="create_user_admin")
|
||||
|
||||
def delete(_: types.Operation) -> None:
|
||||
pass
|
||||
|
||||
def restore(cache: dict) -> types.Operation:
|
||||
return types.Operation(name=cache["name"])
|
||||
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"create_user_admin",
|
||||
lambda: types.Operation(name=""),
|
||||
create,
|
||||
delete,
|
||||
restore,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(name="get_session_context", scope="function")
|
||||
def get_session_context(signoz: types.SigNoz) -> Callable[[str, str], str]:
|
||||
def _get_session_context(email: str) -> str:
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get("/api/v2/sessions/context"),
|
||||
params={
|
||||
"email": email,
|
||||
"ref": f"{signoz.self.host_configs['8080'].base()}",
|
||||
},
|
||||
timeout=5,
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
return response.json()["data"]
|
||||
|
||||
return _get_session_context
|
||||
|
||||
|
||||
@pytest.fixture(name="get_token", scope="function")
|
||||
def get_token(signoz: types.SigNoz) -> Callable[[str, str], str]:
|
||||
def _get_token(email: str, password: str) -> str:
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get("/api/v2/sessions/context"),
|
||||
params={
|
||||
"email": email,
|
||||
"ref": f"{signoz.self.host_configs['8080'].base()}",
|
||||
},
|
||||
timeout=5,
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
org_id = response.json()["data"]["orgs"][0]["id"]
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v2/sessions/email_password"),
|
||||
json={
|
||||
"email": email,
|
||||
"password": password,
|
||||
"orgId": org_id,
|
||||
},
|
||||
timeout=5,
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
return response.json()["data"]["accessToken"]
|
||||
|
||||
return _get_token
|
||||
|
||||
|
||||
@pytest.fixture(name="get_tokens", scope="function")
|
||||
def get_tokens(signoz: types.SigNoz) -> Callable[[str, str], Tuple[str, str]]:
|
||||
def _get_tokens(email: str, password: str) -> str:
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get("/api/v2/sessions/context"),
|
||||
params={
|
||||
"email": email,
|
||||
"ref": f"{signoz.self.host_configs['8080'].base()}",
|
||||
},
|
||||
timeout=5,
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
org_id = response.json()["data"]["orgs"][0]["id"]
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v2/sessions/email_password"),
|
||||
json={
|
||||
"email": email,
|
||||
"password": password,
|
||||
"orgId": org_id,
|
||||
},
|
||||
timeout=5,
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
access_token = response.json()["data"]["accessToken"]
|
||||
refresh_token = response.json()["data"]["refreshToken"]
|
||||
return access_token, refresh_token
|
||||
|
||||
return _get_tokens
|
||||
|
||||
|
||||
@pytest.fixture(name="apply_license", scope="package")
|
||||
def apply_license(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument,redefined-outer-name
|
||||
request: pytest.FixtureRequest,
|
||||
pytestconfig: pytest.Config,
|
||||
) -> types.Operation:
|
||||
"""Stub Zeus license-lookup, then POST /api/v3/licenses so the BE flips
|
||||
to ENTERPRISE. Package-scoped so an e2e bootstrap can pull it in and
|
||||
every spec inherits the licensed state."""
|
||||
|
||||
def create() -> types.Operation:
|
||||
Config.base_url = signoz.zeus.host_configs["8080"].get("/__admin")
|
||||
Mappings.create_mapping(
|
||||
mapping=Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.GET,
|
||||
url="/v2/licenses/me",
|
||||
headers={
|
||||
"X-Signoz-Cloud-Api-Key": {
|
||||
WireMockMatchers.EQUAL_TO: "secret-key"
|
||||
}
|
||||
},
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": {
|
||||
"id": "0196360e-90cd-7a74-8313-1aa815ce2a67",
|
||||
"key": "secret-key",
|
||||
"valid_from": 1732146923,
|
||||
"valid_until": -1,
|
||||
"status": "VALID",
|
||||
"state": "EVALUATING",
|
||||
"plan": {"name": "ENTERPRISE"},
|
||||
"platform": "CLOUD",
|
||||
"features": [],
|
||||
"event_queue": {},
|
||||
},
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
)
|
||||
)
|
||||
|
||||
ctx_resp = requests.get(
|
||||
signoz.self.host_configs["8080"].get("/api/v2/sessions/context"),
|
||||
params={
|
||||
"email": USER_ADMIN_EMAIL,
|
||||
"ref": f"{signoz.self.host_configs['8080'].base()}",
|
||||
},
|
||||
timeout=5,
|
||||
)
|
||||
assert ctx_resp.status_code == HTTPStatus.OK
|
||||
org_id = ctx_resp.json()["data"]["orgs"][0]["id"]
|
||||
|
||||
login_resp = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v2/sessions/email_password"),
|
||||
json={
|
||||
"email": USER_ADMIN_EMAIL,
|
||||
"password": USER_ADMIN_PASSWORD,
|
||||
"orgId": org_id,
|
||||
},
|
||||
timeout=5,
|
||||
)
|
||||
assert login_resp.status_code == HTTPStatus.OK
|
||||
access_token = login_resp.json()["data"]["accessToken"]
|
||||
|
||||
resp = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v3/licenses"),
|
||||
json={"key": "secret-key"},
|
||||
headers={"Authorization": f"Bearer {access_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
# 202 = applied, 409 = already applied. Either is a success.
|
||||
assert resp.status_code in (HTTPStatus.ACCEPTED, HTTPStatus.CONFLICT)
|
||||
|
||||
# The ENTERPRISE license flips on the `onboarding` feature which
|
||||
# redirects first-time admins to the onboarding questionnaire on
|
||||
# every navigation. Mark the org's onboarding preference complete
|
||||
# so specs can navigate directly to the feature under test.
|
||||
pref_resp = requests.put(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
"/api/v1/org/preferences/org_onboarding"
|
||||
),
|
||||
json={"value": True},
|
||||
headers={"Authorization": f"Bearer {access_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert pref_resp.status_code in (
|
||||
HTTPStatus.OK,
|
||||
HTTPStatus.NO_CONTENT,
|
||||
)
|
||||
return types.Operation(name="apply_license")
|
||||
|
||||
def delete(_: types.Operation) -> None:
|
||||
pass
|
||||
|
||||
def restore(cache: dict) -> types.Operation:
|
||||
return types.Operation(name=cache["name"])
|
||||
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"apply_license",
|
||||
lambda: types.Operation(name=""),
|
||||
create,
|
||||
delete,
|
||||
restore,
|
||||
)
|
||||
|
||||
|
||||
# This is not a fixture purposefully, we just want to add a license to the signoz instance.
|
||||
# This is also idempotent in nature.
|
||||
def add_license(
|
||||
signoz: types.SigNoz,
|
||||
make_http_mocks: Callable[[types.TestContainerDocker, List[Mapping]], None],
|
||||
get_token: Callable[[str, str], str], # pylint: disable=redefined-outer-name
|
||||
) -> None:
|
||||
make_http_mocks(
|
||||
signoz.zeus,
|
||||
[
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.GET,
|
||||
url="/v2/licenses/me",
|
||||
headers={
|
||||
"X-Signoz-Cloud-Api-Key": {
|
||||
WireMockMatchers.EQUAL_TO: "secret-key"
|
||||
}
|
||||
},
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": {
|
||||
"id": "0196360e-90cd-7a74-8313-1aa815ce2a67",
|
||||
"key": "secret-key",
|
||||
"valid_from": 1732146923,
|
||||
"valid_until": -1,
|
||||
"status": "VALID",
|
||||
"state": "EVALUATING",
|
||||
"plan": {
|
||||
"name": "ENTERPRISE",
|
||||
},
|
||||
"platform": "CLOUD",
|
||||
"features": [],
|
||||
"event_queue": {},
|
||||
},
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
access_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
response = requests.post(
|
||||
url=signoz.self.host_configs["8080"].get("/api/v3/licenses"),
|
||||
json={"key": "secret-key"},
|
||||
headers={"Authorization": "Bearer " + access_token},
|
||||
timeout=5,
|
||||
)
|
||||
|
||||
if response.status_code == HTTPStatus.CONFLICT:
|
||||
return
|
||||
|
||||
assert response.status_code == HTTPStatus.ACCEPTED
|
||||
|
||||
response = requests.post(
|
||||
url=signoz.zeus.host_configs["8080"].get("/__admin/requests/count"),
|
||||
json={"method": "GET", "url": "/v2/licenses/me"},
|
||||
timeout=5,
|
||||
)
|
||||
|
||||
assert response.json()["count"] == 1
|
||||
|
||||
|
||||
def create_active_user(
|
||||
signoz: types.SigNoz,
|
||||
admin_token: str,
|
||||
email: str,
|
||||
role: str,
|
||||
password: str,
|
||||
name: str = "",
|
||||
) -> str:
|
||||
"""Invite a user and activate via resetPassword. Returns user ID."""
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/invite"),
|
||||
json={"email": email, "role": role, "name": name},
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.CREATED, response.text
|
||||
invited_user = response.json()["data"]
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/resetPassword"),
|
||||
json={"password": password, "token": invited_user["token"]},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.NO_CONTENT, response.text
|
||||
|
||||
return invited_user["id"]
|
||||
|
||||
|
||||
def find_user_by_email(signoz: types.SigNoz, token: str, email: str) -> Dict:
|
||||
"""Find a user by email from the user list. Raises AssertionError if not found."""
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(USERS_BASE),
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.OK, response.text
|
||||
user = next((u for u in response.json()["data"] if u["email"] == email), None)
|
||||
assert user is not None, f"User with email '{email}' not found"
|
||||
return user
|
||||
|
||||
|
||||
def find_user_with_roles_by_email(signoz: types.SigNoz, token: str, email: str) -> Dict:
|
||||
"""Find a user by email and return UserWithRoles (user fields + userRoles).
|
||||
|
||||
Raises AssertionError if the user is not found.
|
||||
"""
|
||||
user = find_user_by_email(signoz, token, email)
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(f"{USERS_BASE}/{user['id']}"),
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.OK, response.text
|
||||
return response.json()["data"]
|
||||
|
||||
|
||||
def assert_user_has_role(data: Dict, role_name: str) -> None:
|
||||
"""Assert that a UserWithRoles response contains the expected managed role."""
|
||||
role_names = {ur["role"]["name"] for ur in data.get("userRoles", [])}
|
||||
assert role_name in role_names, f"Expected role '{role_name}' in {role_names}"
|
||||
|
||||
|
||||
def change_user_role(
|
||||
signoz: types.SigNoz,
|
||||
admin_token: str,
|
||||
user_id: str,
|
||||
old_role: str,
|
||||
new_role: str,
|
||||
) -> None:
|
||||
"""Change a user's role (remove old, assign new).
|
||||
|
||||
Role names should be managed role names (e.g. signoz-editor).
|
||||
"""
|
||||
# Get current roles to find the old role's ID
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(f"{USERS_BASE}/{user_id}/roles"),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.OK, response.text
|
||||
roles = response.json()["data"]
|
||||
|
||||
old_role_entry = next((r for r in roles if r["name"] == old_role), None)
|
||||
assert old_role_entry is not None, f"User does not have role '{old_role}'"
|
||||
|
||||
# Remove old role
|
||||
response = requests.delete(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"{USERS_BASE}/{user_id}/roles/{old_role_entry['id']}"
|
||||
),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.NO_CONTENT, response.text
|
||||
|
||||
# Assign new role
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(f"{USERS_BASE}/{user_id}/roles"),
|
||||
json={"name": new_role},
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.OK, response.text
|
||||
@@ -10,7 +10,7 @@ import pytest
|
||||
from testcontainers.clickhouse import ClickHouseContainer
|
||||
from testcontainers.core.container import Network
|
||||
|
||||
from fixtures import dev, types
|
||||
from fixtures import reuse, types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -256,7 +256,7 @@ def clickhouse(
|
||||
env=env,
|
||||
)
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"clickhouse",
|
||||
@@ -5,6 +5,13 @@ from typing import Callable
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
from wiremock.client import (
|
||||
HttpMethods,
|
||||
Mapping,
|
||||
MappingRequest,
|
||||
MappingResponse,
|
||||
WireMockMatchers,
|
||||
)
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
@@ -153,3 +160,140 @@ def create_cloud_integration_account(
|
||||
logger.info("Cleaned up test account: %s", account_id)
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
logger.info("Post-test delete cleanup failed: %s", exc)
|
||||
|
||||
|
||||
def deprecated_simulate_agent_checkin(
|
||||
signoz: types.SigNoz,
|
||||
admin_token: str,
|
||||
cloud_provider: str,
|
||||
account_id: str,
|
||||
cloud_account_id: str,
|
||||
) -> requests.Response:
|
||||
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/agent-check-in"
|
||||
|
||||
checkin_payload = {
|
||||
"account_id": account_id,
|
||||
"cloud_account_id": cloud_account_id,
|
||||
"data": {},
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
json=checkin_payload,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
if not response.ok:
|
||||
logger.error(
|
||||
"Agent check-in failed: %s, response: %s",
|
||||
response.status_code,
|
||||
response.text,
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def setup_create_account_mocks(
|
||||
signoz: types.SigNoz,
|
||||
make_http_mocks: Callable,
|
||||
) -> None:
|
||||
"""Set up Zeus and Gateway mocks required by the CreateAccount endpoint."""
|
||||
make_http_mocks(
|
||||
signoz.zeus,
|
||||
[
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.GET,
|
||||
url="/v2/deployments/me",
|
||||
headers={
|
||||
"X-Signoz-Cloud-Api-Key": {
|
||||
WireMockMatchers.EQUAL_TO: "secret-key"
|
||||
}
|
||||
},
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": {
|
||||
"name": "test-deployment",
|
||||
"cluster": {"region": {"dns": "test.signoz.cloud"}},
|
||||
},
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
)
|
||||
],
|
||||
)
|
||||
make_http_mocks(
|
||||
signoz.gateway,
|
||||
[
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.GET,
|
||||
url="/v1/workspaces/me/keys/search?name=aws-integration&page=1&per_page=10",
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": [],
|
||||
"_pagination": {"page": 1, "per_page": 10, "total": 0},
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
),
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.POST,
|
||||
url="/v1/workspaces/me/keys",
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": {
|
||||
"name": "aws-integration",
|
||||
"value": "test-ingestion-key-123456",
|
||||
},
|
||||
"error": "",
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
def simulate_agent_checkin(
|
||||
signoz: types.SigNoz,
|
||||
admin_token: str,
|
||||
cloud_provider: str,
|
||||
account_id: str,
|
||||
cloud_account_id: str,
|
||||
data: dict | None = None,
|
||||
) -> requests.Response:
|
||||
endpoint = f"/api/v1/cloud_integrations/{cloud_provider}/accounts/check_in"
|
||||
|
||||
checkin_payload = {
|
||||
"cloudIntegrationId": account_id,
|
||||
"providerAccountId": cloud_account_id,
|
||||
"data": data or {},
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
json=checkin_payload,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
if not response.ok:
|
||||
logger.error(
|
||||
"Agent check-in failed: %s, response: %s",
|
||||
response.status_code,
|
||||
response.text,
|
||||
)
|
||||
|
||||
return response
|
||||
79
tests/fixtures/dashboards.py
vendored
Normal file
79
tests/fixtures/dashboards.py
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
from http import HTTPStatus
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
|
||||
def create_dashboard(
|
||||
signoz: types.SigNoz,
|
||||
token: str,
|
||||
payload: Dict,
|
||||
*,
|
||||
timeout: int = 5,
|
||||
) -> str:
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/dashboards"),
|
||||
json=payload,
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
timeout=timeout,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.CREATED, (
|
||||
f"create_dashboard failed: {response.status_code} {response.text}"
|
||||
)
|
||||
return response.json()["data"]["id"]
|
||||
|
||||
|
||||
def list_dashboards(
|
||||
signoz: types.SigNoz,
|
||||
token: str,
|
||||
*,
|
||||
timeout: int = 5,
|
||||
) -> List[Dict]:
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/dashboards"),
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
timeout=timeout,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.OK, (
|
||||
f"list_dashboards failed: {response.status_code} {response.text}"
|
||||
)
|
||||
return response.json().get("data", []) or []
|
||||
|
||||
|
||||
def find_dashboard_by_title(
|
||||
signoz: types.SigNoz,
|
||||
token: str,
|
||||
title: str,
|
||||
) -> Optional[Dict]:
|
||||
for dashboard in list_dashboards(signoz, token):
|
||||
data = dashboard.get("data") or dashboard
|
||||
if data.get("title") == title:
|
||||
return dashboard
|
||||
return None
|
||||
|
||||
|
||||
def upsert_dashboard(
|
||||
signoz: types.SigNoz,
|
||||
token: str,
|
||||
payload: Dict,
|
||||
) -> str:
|
||||
"""
|
||||
Idempotent create. Looks up by title; if present, returns the existing
|
||||
dashboard id. Intended for warm-backend seed loops under `--reuse`.
|
||||
"""
|
||||
title = payload.get("title")
|
||||
if title:
|
||||
existing = find_dashboard_by_title(signoz, token, title)
|
||||
if existing is not None:
|
||||
dashboard_id = existing.get("id") or (existing.get("data") or {}).get("id")
|
||||
logger.info(
|
||||
"dashboard already present, skipping: %s",
|
||||
{"title": title, "id": dashboard_id},
|
||||
)
|
||||
return dashboard_id
|
||||
return create_dashboard(signoz, token, payload)
|
||||
@@ -1,3 +1,4 @@
|
||||
import os
|
||||
from typing import Any, Generator
|
||||
|
||||
import pytest
|
||||
@@ -13,3 +14,8 @@ def tmpfs(
|
||||
return tmp_path_factory.mktemp(basename)
|
||||
|
||||
yield _tmp
|
||||
|
||||
|
||||
def get_testdata_file_path(file: str) -> str:
|
||||
testdata_dir = os.path.join(os.path.dirname(__file__), "..", "testdata")
|
||||
return os.path.join(testdata_dir, file)
|
||||
@@ -12,7 +12,7 @@ from wiremock.client import (
|
||||
from wiremock.constants import Config
|
||||
from wiremock.testing.testcontainer import WireMockContainer
|
||||
|
||||
from fixtures import dev, types
|
||||
from fixtures import reuse, types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -63,7 +63,7 @@ def zeus(
|
||||
def restore(cache: dict) -> types.TestContainerDocker:
|
||||
return types.TestContainerDocker.from_cache(cache)
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"zeus",
|
||||
@@ -120,7 +120,7 @@ def gateway(
|
||||
def restore(cache: dict) -> types.TestContainerDocker:
|
||||
return types.TestContainerDocker.from_cache(cache)
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"gateway",
|
||||
@@ -11,7 +11,7 @@ from selenium.webdriver.support import expected_conditions as EC
|
||||
from selenium.webdriver.support.wait import WebDriverWait
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.idp import IDP_ROOT_PASSWORD, IDP_ROOT_USERNAME
|
||||
from fixtures.keycloak import IDP_ROOT_PASSWORD, IDP_ROOT_USERNAME
|
||||
|
||||
|
||||
@pytest.fixture(name="create_saml_client", scope="function")
|
||||
@@ -4,7 +4,7 @@ import pytest
|
||||
from testcontainers.core.container import Network
|
||||
from testcontainers.keycloak import KeycloakContainer
|
||||
|
||||
from fixtures import dev, types
|
||||
from fixtures import reuse, types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -80,7 +80,7 @@ def idp(
|
||||
container=container,
|
||||
)
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"idp",
|
||||
@@ -11,7 +11,7 @@ from ksuid import KsuidMs
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.fingerprint import LogsOrTracesFingerprint
|
||||
from fixtures.utils import parse_timestamp
|
||||
from fixtures.time import parse_timestamp
|
||||
|
||||
|
||||
class LogsResource(ABC):
|
||||
@@ -391,112 +391,126 @@ class Logs(ABC):
|
||||
return logs
|
||||
|
||||
|
||||
def insert_logs_to_clickhouse(conn, logs: List[Logs]) -> None:
|
||||
"""
|
||||
Insert logs into ClickHouse tables following the same logic as the Go exporter.
|
||||
Handles insertion into:
|
||||
- distributed_logs_v2 (main logs table)
|
||||
- distributed_logs_v2_resource (resource fingerprints)
|
||||
- distributed_tag_attributes_v2 (tag attributes)
|
||||
- distributed_logs_attribute_keys (attribute keys)
|
||||
- distributed_logs_resource_keys (resource keys)
|
||||
|
||||
Pure function so the seeder container can reuse the exact insert path
|
||||
used by the pytest fixture. `conn` is a clickhouse-connect Client.
|
||||
"""
|
||||
resources: List[LogsResource] = []
|
||||
for log in logs:
|
||||
resources.extend(log.resource)
|
||||
|
||||
if len(resources) > 0:
|
||||
conn.insert(
|
||||
database="signoz_logs",
|
||||
table="distributed_logs_v2_resource",
|
||||
data=[resource.np_arr() for resource in resources],
|
||||
column_names=[
|
||||
"labels",
|
||||
"fingerprint",
|
||||
"seen_at_ts_bucket_start",
|
||||
],
|
||||
)
|
||||
|
||||
tag_attributes: List[LogsTagAttributes] = []
|
||||
for log in logs:
|
||||
tag_attributes.extend(log.tag_attributes)
|
||||
|
||||
if len(tag_attributes) > 0:
|
||||
conn.insert(
|
||||
database="signoz_logs",
|
||||
table="distributed_tag_attributes_v2",
|
||||
data=[tag_attribute.np_arr() for tag_attribute in tag_attributes],
|
||||
)
|
||||
|
||||
attribute_keys: List[LogsResourceOrAttributeKeys] = []
|
||||
for log in logs:
|
||||
attribute_keys.extend(log.attribute_keys)
|
||||
|
||||
if len(attribute_keys) > 0:
|
||||
conn.insert(
|
||||
database="signoz_logs",
|
||||
table="distributed_logs_attribute_keys",
|
||||
data=[attribute_key.np_arr() for attribute_key in attribute_keys],
|
||||
)
|
||||
|
||||
resource_keys: List[LogsResourceOrAttributeKeys] = []
|
||||
for log in logs:
|
||||
resource_keys.extend(log.resource_keys)
|
||||
|
||||
if len(resource_keys) > 0:
|
||||
conn.insert(
|
||||
database="signoz_logs",
|
||||
table="distributed_logs_resource_keys",
|
||||
data=[resource_key.np_arr() for resource_key in resource_keys],
|
||||
)
|
||||
|
||||
conn.insert(
|
||||
database="signoz_logs",
|
||||
table="distributed_logs_v2",
|
||||
data=[log.np_arr() for log in logs],
|
||||
column_names=[
|
||||
"ts_bucket_start",
|
||||
"resource_fingerprint",
|
||||
"timestamp",
|
||||
"observed_timestamp",
|
||||
"id",
|
||||
"trace_id",
|
||||
"span_id",
|
||||
"trace_flags",
|
||||
"severity_text",
|
||||
"severity_number",
|
||||
"body",
|
||||
"attributes_string",
|
||||
"attributes_number",
|
||||
"attributes_bool",
|
||||
"resources_string",
|
||||
"scope_name",
|
||||
"scope_version",
|
||||
"scope_string",
|
||||
"resource",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
_LOGS_TABLES_TO_TRUNCATE = [
|
||||
"logs_v2",
|
||||
"logs_v2_resource",
|
||||
"tag_attributes_v2",
|
||||
"logs_attribute_keys",
|
||||
"logs_resource_keys",
|
||||
]
|
||||
|
||||
|
||||
def truncate_logs_tables(conn, cluster: str) -> None:
|
||||
"""Truncate all logs tables. Used by the pytest fixture teardown and by
|
||||
the seeder's DELETE /telemetry/logs endpoint."""
|
||||
for table in _LOGS_TABLES_TO_TRUNCATE:
|
||||
conn.query(
|
||||
f"TRUNCATE TABLE signoz_logs.{table} ON CLUSTER '{cluster}' SYNC"
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(name="insert_logs", scope="function")
|
||||
def insert_logs(
|
||||
clickhouse: types.TestContainerClickhouse,
|
||||
) -> Generator[Callable[[List[Logs]], None], Any, None]:
|
||||
def _insert_logs(logs: List[Logs]) -> None:
|
||||
"""
|
||||
Insert logs into ClickHouse tables following the same logic as the Go exporter.
|
||||
This function handles insertion into multiple tables:
|
||||
- distributed_logs_v2 (main logs table)
|
||||
- distributed_logs_v2_resource (resource fingerprints)
|
||||
- distributed_tag_attributes_v2 (tag attributes)
|
||||
- distributed_logs_attribute_keys (attribute keys)
|
||||
- distributed_logs_resource_keys (resource keys)
|
||||
"""
|
||||
resources: List[LogsResource] = []
|
||||
for log in logs:
|
||||
resources.extend(log.resource)
|
||||
|
||||
if len(resources) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_logs",
|
||||
table="distributed_logs_v2_resource",
|
||||
data=[resource.np_arr() for resource in resources],
|
||||
column_names=[
|
||||
"labels",
|
||||
"fingerprint",
|
||||
"seen_at_ts_bucket_start",
|
||||
],
|
||||
)
|
||||
|
||||
tag_attributes: List[LogsTagAttributes] = []
|
||||
for log in logs:
|
||||
tag_attributes.extend(log.tag_attributes)
|
||||
|
||||
if len(tag_attributes) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_logs",
|
||||
table="distributed_tag_attributes_v2",
|
||||
data=[tag_attribute.np_arr() for tag_attribute in tag_attributes],
|
||||
)
|
||||
|
||||
attribute_keys: List[LogsResourceOrAttributeKeys] = []
|
||||
for log in logs:
|
||||
attribute_keys.extend(log.attribute_keys)
|
||||
|
||||
if len(attribute_keys) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_logs",
|
||||
table="distributed_logs_attribute_keys",
|
||||
data=[attribute_key.np_arr() for attribute_key in attribute_keys],
|
||||
)
|
||||
|
||||
resource_keys: List[LogsResourceOrAttributeKeys] = []
|
||||
for log in logs:
|
||||
resource_keys.extend(log.resource_keys)
|
||||
|
||||
if len(resource_keys) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_logs",
|
||||
table="distributed_logs_resource_keys",
|
||||
data=[resource_key.np_arr() for resource_key in resource_keys],
|
||||
)
|
||||
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_logs",
|
||||
table="distributed_logs_v2",
|
||||
data=[log.np_arr() for log in logs],
|
||||
column_names=[
|
||||
"ts_bucket_start",
|
||||
"resource_fingerprint",
|
||||
"timestamp",
|
||||
"observed_timestamp",
|
||||
"id",
|
||||
"trace_id",
|
||||
"span_id",
|
||||
"trace_flags",
|
||||
"severity_text",
|
||||
"severity_number",
|
||||
"body",
|
||||
"attributes_string",
|
||||
"attributes_number",
|
||||
"attributes_bool",
|
||||
"resources_string",
|
||||
"scope_name",
|
||||
"scope_version",
|
||||
"scope_string",
|
||||
"resource",
|
||||
],
|
||||
)
|
||||
insert_logs_to_clickhouse(clickhouse.conn, logs)
|
||||
|
||||
yield _insert_logs
|
||||
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_logs.logs_v2 ON CLUSTER '{clickhouse.env['SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER']}' SYNC"
|
||||
)
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_logs.logs_v2_resource ON CLUSTER '{clickhouse.env['SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER']}' SYNC"
|
||||
)
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_logs.tag_attributes_v2 ON CLUSTER '{clickhouse.env['SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER']}' SYNC"
|
||||
)
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_logs.logs_attribute_keys ON CLUSTER '{clickhouse.env['SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER']}' SYNC"
|
||||
)
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_logs.logs_resource_keys ON CLUSTER '{clickhouse.env['SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER']}' SYNC"
|
||||
truncate_logs_tables(
|
||||
clickhouse.conn,
|
||||
clickhouse.env["SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER"],
|
||||
)
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import numpy as np
|
||||
import pytest
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.utils import parse_timestamp
|
||||
from fixtures.time import parse_timestamp
|
||||
|
||||
|
||||
class MetricsTimeSeries(ABC):
|
||||
@@ -417,151 +417,168 @@ class Metrics(ABC):
|
||||
return metrics
|
||||
|
||||
|
||||
def insert_metrics_to_clickhouse(conn, metrics: List[Metrics]) -> None:
|
||||
"""
|
||||
Insert metrics into ClickHouse tables.
|
||||
Handles insertion into:
|
||||
- distributed_time_series_v4 (time series metadata)
|
||||
- distributed_samples_v4 (actual sample values)
|
||||
- distributed_metadata (metric attribute metadata)
|
||||
|
||||
Pure function so the seeder container can reuse the exact insert path
|
||||
used by the pytest fixture. `conn` is a clickhouse-connect Client.
|
||||
"""
|
||||
time_series_map: dict[int, MetricsTimeSeries] = {}
|
||||
for metric in metrics:
|
||||
fp = int(metric.time_series.fingerprint)
|
||||
if fp not in time_series_map:
|
||||
time_series_map[fp] = metric.time_series
|
||||
|
||||
if len(time_series_map) > 0:
|
||||
conn.insert(
|
||||
database="signoz_metrics",
|
||||
table="distributed_time_series_v4",
|
||||
column_names=[
|
||||
"env",
|
||||
"temporality",
|
||||
"metric_name",
|
||||
"description",
|
||||
"unit",
|
||||
"type",
|
||||
"is_monotonic",
|
||||
"fingerprint",
|
||||
"unix_milli",
|
||||
"labels",
|
||||
"attrs",
|
||||
"scope_attrs",
|
||||
"resource_attrs",
|
||||
"__normalized",
|
||||
],
|
||||
data=[ts.to_row() for ts in time_series_map.values()],
|
||||
)
|
||||
|
||||
samples = [metric.sample for metric in metrics]
|
||||
if len(samples) > 0:
|
||||
conn.insert(
|
||||
database="signoz_metrics",
|
||||
table="distributed_samples_v4",
|
||||
column_names=[
|
||||
"env",
|
||||
"temporality",
|
||||
"metric_name",
|
||||
"fingerprint",
|
||||
"unix_milli",
|
||||
"value",
|
||||
"flags",
|
||||
],
|
||||
data=[sample.to_row() for sample in samples],
|
||||
)
|
||||
|
||||
# (metric_name, attr_type, attr_name, attr_value) -> MetricsMetadata
|
||||
metadata_map: dict[tuple, MetricsMetadata] = {}
|
||||
for metric in metrics:
|
||||
ts = metric.time_series
|
||||
for attr_name, attr_value in metric.labels.items():
|
||||
key = (ts.metric_name, "point", attr_name, str(attr_value))
|
||||
if key not in metadata_map:
|
||||
metadata_map[key] = MetricsMetadata(
|
||||
metric_name=ts.metric_name,
|
||||
attr_name=attr_name,
|
||||
attr_type="point",
|
||||
attr_datatype="String",
|
||||
attr_string_value=str(attr_value),
|
||||
timestamp=metric.timestamp,
|
||||
temporality=ts.temporality,
|
||||
description=ts.description,
|
||||
unit=ts.unit,
|
||||
type_=ts.type,
|
||||
is_monotonic=ts.is_monotonic,
|
||||
)
|
||||
for attr_name, attr_value in ts.resource_attrs.items():
|
||||
key = (ts.metric_name, "resource", attr_name, str(attr_value))
|
||||
if key not in metadata_map:
|
||||
metadata_map[key] = MetricsMetadata(
|
||||
metric_name=ts.metric_name,
|
||||
attr_name=attr_name,
|
||||
attr_type="resource",
|
||||
attr_datatype="String",
|
||||
attr_string_value=str(attr_value),
|
||||
timestamp=metric.timestamp,
|
||||
temporality=ts.temporality,
|
||||
description=ts.description,
|
||||
unit=ts.unit,
|
||||
type_=ts.type,
|
||||
is_monotonic=ts.is_monotonic,
|
||||
)
|
||||
for attr_name, attr_value in ts.scope_attrs.items():
|
||||
key = (ts.metric_name, "scope", attr_name, str(attr_value))
|
||||
if key not in metadata_map:
|
||||
metadata_map[key] = MetricsMetadata(
|
||||
metric_name=ts.metric_name,
|
||||
attr_name=attr_name,
|
||||
attr_type="scope",
|
||||
attr_datatype="String",
|
||||
attr_string_value=str(attr_value),
|
||||
timestamp=metric.timestamp,
|
||||
temporality=ts.temporality,
|
||||
description=ts.description,
|
||||
unit=ts.unit,
|
||||
type_=ts.type,
|
||||
is_monotonic=ts.is_monotonic,
|
||||
)
|
||||
|
||||
if len(metadata_map) > 0:
|
||||
conn.insert(
|
||||
database="signoz_metrics",
|
||||
table="distributed_metadata",
|
||||
column_names=[
|
||||
"temporality",
|
||||
"metric_name",
|
||||
"description",
|
||||
"unit",
|
||||
"type",
|
||||
"is_monotonic",
|
||||
"attr_name",
|
||||
"attr_type",
|
||||
"attr_datatype",
|
||||
"attr_string_value",
|
||||
"first_reported_unix_milli",
|
||||
"last_reported_unix_milli",
|
||||
],
|
||||
data=[m.to_row() for m in metadata_map.values()],
|
||||
)
|
||||
|
||||
|
||||
_METRICS_TABLES_TO_TRUNCATE = [
|
||||
"time_series_v4",
|
||||
"samples_v4",
|
||||
"exp_hist",
|
||||
"metadata",
|
||||
]
|
||||
|
||||
|
||||
def truncate_metrics_tables(conn, cluster: str) -> None:
|
||||
"""Truncate all metrics tables. Used by the pytest fixture teardown and by
|
||||
the seeder's DELETE /telemetry/metrics endpoint."""
|
||||
for table in _METRICS_TABLES_TO_TRUNCATE:
|
||||
conn.query(
|
||||
f"TRUNCATE TABLE signoz_metrics.{table} ON CLUSTER '{cluster}' SYNC"
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(name="insert_metrics", scope="function")
|
||||
def insert_metrics(
|
||||
clickhouse: types.TestContainerClickhouse,
|
||||
) -> Generator[Callable[[List[Metrics]], None], Any, None]:
|
||||
def _insert_metrics(metrics: List[Metrics]) -> None:
|
||||
"""
|
||||
Insert metrics into ClickHouse tables.
|
||||
This function handles insertion into:
|
||||
- distributed_time_series_v4 (time series metadata)
|
||||
- distributed_samples_v4 (actual sample values)
|
||||
- distributed_metadata (metric attribute metadata)
|
||||
"""
|
||||
time_series_map: dict[int, MetricsTimeSeries] = {}
|
||||
for metric in metrics:
|
||||
fp = int(metric.time_series.fingerprint)
|
||||
if fp not in time_series_map:
|
||||
time_series_map[fp] = metric.time_series
|
||||
|
||||
if len(time_series_map) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_metrics",
|
||||
table="distributed_time_series_v4",
|
||||
column_names=[
|
||||
"env",
|
||||
"temporality",
|
||||
"metric_name",
|
||||
"description",
|
||||
"unit",
|
||||
"type",
|
||||
"is_monotonic",
|
||||
"fingerprint",
|
||||
"unix_milli",
|
||||
"labels",
|
||||
"attrs",
|
||||
"scope_attrs",
|
||||
"resource_attrs",
|
||||
"__normalized",
|
||||
],
|
||||
data=[ts.to_row() for ts in time_series_map.values()],
|
||||
)
|
||||
|
||||
samples = [metric.sample for metric in metrics]
|
||||
if len(samples) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_metrics",
|
||||
table="distributed_samples_v4",
|
||||
column_names=[
|
||||
"env",
|
||||
"temporality",
|
||||
"metric_name",
|
||||
"fingerprint",
|
||||
"unix_milli",
|
||||
"value",
|
||||
"flags",
|
||||
],
|
||||
data=[sample.to_row() for sample in samples],
|
||||
)
|
||||
|
||||
# (metric_name, attr_type, attr_name, attr_value) -> MetricsMetadata
|
||||
metadata_map: dict[tuple, MetricsMetadata] = {}
|
||||
for metric in metrics:
|
||||
ts = metric.time_series
|
||||
for attr_name, attr_value in metric.labels.items():
|
||||
key = (ts.metric_name, "point", attr_name, str(attr_value))
|
||||
if key not in metadata_map:
|
||||
metadata_map[key] = MetricsMetadata(
|
||||
metric_name=ts.metric_name,
|
||||
attr_name=attr_name,
|
||||
attr_type="point",
|
||||
attr_datatype="String",
|
||||
attr_string_value=str(attr_value),
|
||||
timestamp=metric.timestamp,
|
||||
temporality=ts.temporality,
|
||||
description=ts.description,
|
||||
unit=ts.unit,
|
||||
type_=ts.type,
|
||||
is_monotonic=ts.is_monotonic,
|
||||
)
|
||||
for attr_name, attr_value in ts.resource_attrs.items():
|
||||
key = (ts.metric_name, "resource", attr_name, str(attr_value))
|
||||
if key not in metadata_map:
|
||||
metadata_map[key] = MetricsMetadata(
|
||||
metric_name=ts.metric_name,
|
||||
attr_name=attr_name,
|
||||
attr_type="resource",
|
||||
attr_datatype="String",
|
||||
attr_string_value=str(attr_value),
|
||||
timestamp=metric.timestamp,
|
||||
temporality=ts.temporality,
|
||||
description=ts.description,
|
||||
unit=ts.unit,
|
||||
type_=ts.type,
|
||||
is_monotonic=ts.is_monotonic,
|
||||
)
|
||||
for attr_name, attr_value in ts.scope_attrs.items():
|
||||
key = (ts.metric_name, "scope", attr_name, str(attr_value))
|
||||
if key not in metadata_map:
|
||||
metadata_map[key] = MetricsMetadata(
|
||||
metric_name=ts.metric_name,
|
||||
attr_name=attr_name,
|
||||
attr_type="scope",
|
||||
attr_datatype="String",
|
||||
attr_string_value=str(attr_value),
|
||||
timestamp=metric.timestamp,
|
||||
temporality=ts.temporality,
|
||||
description=ts.description,
|
||||
unit=ts.unit,
|
||||
type_=ts.type,
|
||||
is_monotonic=ts.is_monotonic,
|
||||
)
|
||||
|
||||
if len(metadata_map) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_metrics",
|
||||
table="distributed_metadata",
|
||||
column_names=[
|
||||
"temporality",
|
||||
"metric_name",
|
||||
"description",
|
||||
"unit",
|
||||
"type",
|
||||
"is_monotonic",
|
||||
"attr_name",
|
||||
"attr_type",
|
||||
"attr_datatype",
|
||||
"attr_string_value",
|
||||
"first_reported_unix_milli",
|
||||
"last_reported_unix_milli",
|
||||
],
|
||||
data=[m.to_row() for m in metadata_map.values()],
|
||||
)
|
||||
insert_metrics_to_clickhouse(clickhouse.conn, metrics)
|
||||
|
||||
yield _insert_metrics
|
||||
|
||||
cluster = clickhouse.env["SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER"]
|
||||
tables_to_truncate = [
|
||||
"time_series_v4",
|
||||
"samples_v4",
|
||||
"exp_hist",
|
||||
"metadata",
|
||||
]
|
||||
for table in tables_to_truncate:
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_metrics.{table} ON CLUSTER '{cluster}' SYNC"
|
||||
)
|
||||
truncate_metrics_tables(
|
||||
clickhouse.conn,
|
||||
clickhouse.env["SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER"],
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(name="remove_metrics_ttl_and_storage_settings", scope="function")
|
||||
@@ -2,7 +2,7 @@ import docker
|
||||
import pytest
|
||||
from testcontainers.core.container import Network
|
||||
|
||||
from fixtures import dev, types
|
||||
from fixtures import reuse, types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -67,7 +67,7 @@ def migrator(
|
||||
def restore(cache: dict) -> types.Operation:
|
||||
return types.Operation(name=cache["name"])
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"migrator",
|
||||
@@ -3,7 +3,7 @@ import docker.errors
|
||||
import pytest
|
||||
from testcontainers.core.network import Network
|
||||
|
||||
from fixtures import dev, types
|
||||
from fixtures import reuse, types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -37,7 +37,7 @@ def network(
|
||||
nw = client.networks.get(network_id=existing.get("id"))
|
||||
return types.Network(id=nw.id, name=nw.name)
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"network",
|
||||
@@ -8,7 +8,7 @@ import requests
|
||||
from testcontainers.core.container import Network
|
||||
from wiremock.testing.testcontainer import WireMockContainer
|
||||
|
||||
from fixtures import dev, types
|
||||
from fixtures import reuse, types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
@@ -60,7 +60,7 @@ def notification_channel(
|
||||
def restore(cache: dict) -> types.TestContainerDocker:
|
||||
return types.TestContainerDocker.from_cache(cache)
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"notification_channel",
|
||||
@@ -5,7 +5,7 @@ from sqlalchemy import create_engine, sql
|
||||
from testcontainers.core.container import Network
|
||||
from testcontainers.postgres import PostgresContainer
|
||||
|
||||
from fixtures import dev, types
|
||||
from fixtures import reuse, types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -97,7 +97,7 @@ def postgres(
|
||||
env=env,
|
||||
)
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"postgres",
|
||||
@@ -1,10 +1,13 @@
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timedelta
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.logs import Logs
|
||||
from fixtures.traces import TraceIdGenerator, Traces, TracesKind, TracesStatusCode
|
||||
|
||||
DEFAULT_STEP_INTERVAL = 60 # seconds
|
||||
DEFAULT_TOLERANCE = 1e-9
|
||||
@@ -583,3 +586,251 @@ def assert_scalar_column_order(
|
||||
f"{context}: Column {column_index} order mismatch. "
|
||||
f"Expected {expected_values}, got {actual_values}"
|
||||
)
|
||||
|
||||
|
||||
def format_timestamp(dt: datetime) -> str:
|
||||
"""
|
||||
Format a datetime object to match the API's timestamp format.
|
||||
The API returns timestamps with minimal fractional seconds precision.
|
||||
Example: 2026-02-03T20:54:56.5Z for 500000 microseconds
|
||||
"""
|
||||
base_str = dt.strftime("%Y-%m-%dT%H:%M:%S")
|
||||
if dt.microsecond:
|
||||
# Convert microseconds to fractional seconds and strip trailing zeros
|
||||
fractional = f"{dt.microsecond / 1000000:.6f}"[2:].rstrip("0")
|
||||
return f"{base_str}.{fractional}Z"
|
||||
return f"{base_str}Z"
|
||||
|
||||
|
||||
def assert_identical_query_response(
|
||||
response1: requests.Response, response2: requests.Response
|
||||
) -> None:
|
||||
"""
|
||||
Assert that two query responses are identical in status and data.
|
||||
"""
|
||||
assert response1.status_code == response2.status_code, "Status codes do not match"
|
||||
if response1.status_code == HTTPStatus.OK:
|
||||
assert (
|
||||
response1.json()["status"] == response2.json()["status"]
|
||||
), "Response statuses do not match"
|
||||
assert (
|
||||
response1.json()["data"]["data"]["results"]
|
||||
== response2.json()["data"]["data"]["results"]
|
||||
), "Response data do not match"
|
||||
|
||||
|
||||
def generate_logs_with_corrupt_metadata() -> List[Logs]:
|
||||
"""
|
||||
Specifically, entries with 'id', 'timestamp', 'severity_text', 'severity_number' and 'body' fields in metadata
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
return [
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=4),
|
||||
body="POST /integration request received",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"timestamp": "corrupt_data",
|
||||
},
|
||||
attributes={
|
||||
"net.transport": "IP.TCP",
|
||||
"http.scheme": "http",
|
||||
"http.user_agent": "Integration Test",
|
||||
"http.request.method": "POST",
|
||||
"http.response.status_code": "200",
|
||||
"severity_text": "corrupt_data",
|
||||
"timestamp": "corrupt_data",
|
||||
},
|
||||
trace_id="1",
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=3),
|
||||
body="SELECT query executed",
|
||||
severity_text="DEBUG",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"severity_number": "corrupt_data",
|
||||
"id": "corrupt_data",
|
||||
},
|
||||
attributes={
|
||||
"db.name": "integration",
|
||||
"db.operation": "SELECT",
|
||||
"db.statement": "SELECT * FROM integration",
|
||||
"trace_id": "2",
|
||||
},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=2),
|
||||
body="HTTP PATCH failed with 404",
|
||||
severity_text="WARN",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"body": "corrupt_data",
|
||||
"trace_id": "3",
|
||||
},
|
||||
attributes={
|
||||
"http.request.method": "PATCH",
|
||||
"http.status_code": "404",
|
||||
"id": "1",
|
||||
},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=1),
|
||||
body="{'trace_id': '4'}",
|
||||
severity_text="ERROR",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "topic-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-001",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "001",
|
||||
},
|
||||
attributes={
|
||||
"message.type": "SENT",
|
||||
"messaging.operation": "publish",
|
||||
"messaging.message.id": "001",
|
||||
"body": "corrupt_data",
|
||||
"timestamp": "corrupt_data",
|
||||
},
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def generate_traces_with_corrupt_metadata() -> List[Traces]:
|
||||
"""
|
||||
Specifically, entries with 'id', 'timestamp', 'trace_id' and 'duration_nano' fields in metadata
|
||||
"""
|
||||
http_service_trace_id = TraceIdGenerator.trace_id()
|
||||
http_service_span_id = TraceIdGenerator.span_id()
|
||||
http_service_db_span_id = TraceIdGenerator.span_id()
|
||||
http_service_patch_span_id = TraceIdGenerator.span_id()
|
||||
topic_service_trace_id = TraceIdGenerator.trace_id()
|
||||
topic_service_span_id = TraceIdGenerator.span_id()
|
||||
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
return [
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=4),
|
||||
duration=timedelta(seconds=3),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_span_id,
|
||||
parent_span_id="",
|
||||
name="POST /integration",
|
||||
kind=TracesKind.SPAN_KIND_SERVER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"trace_id": "corrupt_data",
|
||||
},
|
||||
attributes={
|
||||
"net.transport": "IP.TCP",
|
||||
"http.scheme": "http",
|
||||
"http.user_agent": "Integration Test",
|
||||
"http.request.method": "POST",
|
||||
"http.response.status_code": "200",
|
||||
"timestamp": "corrupt_data",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=3.5),
|
||||
duration=timedelta(seconds=5),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_db_span_id,
|
||||
parent_span_id=http_service_span_id,
|
||||
name="SELECT",
|
||||
kind=TracesKind.SPAN_KIND_CLIENT,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"timestamp": "corrupt_data",
|
||||
},
|
||||
attributes={
|
||||
"db.name": "integration",
|
||||
"db.operation": "SELECT",
|
||||
"db.statement": "SELECT * FROM integration",
|
||||
"trace_d": "corrupt_data",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=3),
|
||||
duration=timedelta(seconds=1),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_patch_span_id,
|
||||
parent_span_id=http_service_span_id,
|
||||
name="HTTP PATCH",
|
||||
kind=TracesKind.SPAN_KIND_CLIENT,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"duration_nano": "corrupt_data",
|
||||
},
|
||||
attributes={
|
||||
"http.request.method": "PATCH",
|
||||
"http.status_code": "404",
|
||||
"id": "1",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=1),
|
||||
duration=timedelta(seconds=4),
|
||||
trace_id=topic_service_trace_id,
|
||||
span_id=topic_service_span_id,
|
||||
parent_span_id="",
|
||||
name="topic publish",
|
||||
kind=TracesKind.SPAN_KIND_PRODUCER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "topic-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-001",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "001",
|
||||
},
|
||||
attributes={
|
||||
"message.type": "SENT",
|
||||
"messaging.operation": "publish",
|
||||
"messaging.message.id": "001",
|
||||
"duration_nano": "corrupt_data",
|
||||
"id": 1,
|
||||
},
|
||||
),
|
||||
]
|
||||
116
tests/fixtures/seeder.py
vendored
Normal file
116
tests/fixtures/seeder.py
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
import time
|
||||
from http import HTTPStatus
|
||||
from pathlib import Path
|
||||
|
||||
import docker
|
||||
import docker.errors
|
||||
import pytest
|
||||
import requests
|
||||
from testcontainers.core.container import DockerContainer, Network
|
||||
|
||||
from fixtures import reuse, types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
# Build context is tests/ so `fixtures/` is importable inside the container
|
||||
# under /app/fixtures. This file sits at tests/fixtures/seeder.py, hence
|
||||
# parents[1] = tests/.
|
||||
_TESTS_ROOT = Path(__file__).resolve().parents[1]
|
||||
|
||||
|
||||
@pytest.fixture(name="seeder", scope="package")
|
||||
def seeder(
|
||||
network: Network,
|
||||
clickhouse: types.TestContainerClickhouse,
|
||||
request: pytest.FixtureRequest,
|
||||
pytestconfig: pytest.Config,
|
||||
) -> types.TestContainerDocker:
|
||||
"""
|
||||
HTTP seeder fixture — a Python container exposing POST/DELETE endpoints
|
||||
that wrap the direct-ClickHouse-insert helpers (currently just traces;
|
||||
logs + metrics to follow). Frontend tests call these endpoints to seed
|
||||
telemetry with fine-grained per-test control.
|
||||
"""
|
||||
|
||||
def create() -> types.TestContainerDocker:
|
||||
# docker-py wants `dockerfile` RELATIVE to `path`. The fixture file
|
||||
# lives at tests/fixtures/seeder.py so the build context root is
|
||||
# tests/ (one parent up), and the Dockerfile path inside that
|
||||
# context is Dockerfile.seeder.
|
||||
docker_client = docker.from_env()
|
||||
docker_client.images.build(
|
||||
path=str(_TESTS_ROOT),
|
||||
dockerfile="Dockerfile.seeder",
|
||||
tag="signoz-tests-seeder:latest",
|
||||
rm=True,
|
||||
)
|
||||
|
||||
container = DockerContainer("signoz-tests-seeder:latest")
|
||||
container.with_env(
|
||||
"CH_HOST", clickhouse.container.container_configs["8123"].address
|
||||
)
|
||||
container.with_env(
|
||||
"CH_PORT", str(clickhouse.container.container_configs["8123"].port)
|
||||
)
|
||||
container.with_env(
|
||||
"CH_USER", clickhouse.env["SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_USERNAME"]
|
||||
)
|
||||
container.with_env(
|
||||
"CH_PASSWORD", clickhouse.env["SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_PASSWORD"]
|
||||
)
|
||||
container.with_env(
|
||||
"CH_CLUSTER", clickhouse.env["SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER"]
|
||||
)
|
||||
container.with_exposed_ports(8080)
|
||||
container.with_network(network=network)
|
||||
container.start()
|
||||
|
||||
host = container.get_container_host_ip()
|
||||
host_port = container.get_exposed_port(8080)
|
||||
|
||||
for attempt in range(20):
|
||||
try:
|
||||
response = requests.get(f"http://{host}:{host_port}/healthz", timeout=2)
|
||||
if response.status_code == HTTPStatus.OK:
|
||||
break
|
||||
except Exception as e: # pylint: disable=broad-exception-caught
|
||||
logger.info("seeder attempt %d: %s", attempt + 1, e)
|
||||
time.sleep(1)
|
||||
else:
|
||||
raise TimeoutError("seeder container did not become ready")
|
||||
|
||||
return types.TestContainerDocker(
|
||||
id=container.get_wrapped_container().id,
|
||||
host_configs={
|
||||
"8080": types.TestContainerUrlConfig("http", host, host_port),
|
||||
},
|
||||
container_configs={
|
||||
"8080": types.TestContainerUrlConfig(
|
||||
"http", container.get_wrapped_container().name, 8080
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
def delete(container: types.TestContainerDocker) -> None:
|
||||
client = docker.from_env()
|
||||
try:
|
||||
client.containers.get(container_id=container.id).stop()
|
||||
client.containers.get(container_id=container.id).remove(v=True)
|
||||
except docker.errors.NotFound:
|
||||
logger.info("Seeder container %s already gone", container.id)
|
||||
|
||||
def restore(cache: dict) -> types.TestContainerDocker:
|
||||
return types.TestContainerDocker.from_cache(cache)
|
||||
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"seeder",
|
||||
empty=lambda: types.TestContainerDocker(
|
||||
id="", host_configs={}, container_configs={}
|
||||
),
|
||||
create=create,
|
||||
delete=delete,
|
||||
restore=restore,
|
||||
)
|
||||
@@ -2,6 +2,7 @@ import platform
|
||||
import time
|
||||
from http import HTTPStatus
|
||||
from os import path
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import docker
|
||||
@@ -11,11 +12,16 @@ import requests
|
||||
from testcontainers.core.container import DockerContainer, Network
|
||||
from testcontainers.core.image import DockerImage
|
||||
|
||||
from fixtures import dev, types
|
||||
from fixtures import reuse, types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
# Absolute path to the signoz repo root. Anchored to this file so the build
|
||||
# context resolves correctly regardless of pytest's cwd (tests/ vs
|
||||
# tests/integration/). fixtures/signoz.py -> fixtures/ -> tests/ -> repo root.
|
||||
_REPO_ROOT = Path(__file__).resolve().parents[2]
|
||||
|
||||
|
||||
def create_signoz(
|
||||
network: Network,
|
||||
@@ -50,7 +56,7 @@ def create_signoz(
|
||||
dockerfile_path = "cmd/enterprise/Dockerfile.with-web.integration"
|
||||
|
||||
self = DockerImage(
|
||||
path="../../",
|
||||
path=str(_REPO_ROOT),
|
||||
dockerfile_path=dockerfile_path,
|
||||
tag="signoz:integration",
|
||||
buildargs={
|
||||
@@ -181,7 +187,7 @@ def create_signoz(
|
||||
gateway=gateway,
|
||||
)
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
cache_key,
|
||||
@@ -4,7 +4,7 @@ from typing import Any, Generator
|
||||
import pytest
|
||||
from sqlalchemy import create_engine, sql
|
||||
|
||||
from fixtures import dev, types
|
||||
from fixtures import reuse, types
|
||||
|
||||
ConnectionTuple = namedtuple("ConnectionTuple", "connection config")
|
||||
|
||||
@@ -64,7 +64,7 @@ def sqlite(
|
||||
env=cache["env"],
|
||||
)
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"sqlite",
|
||||
@@ -1,5 +1,4 @@
|
||||
import datetime
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
import isodate
|
||||
@@ -26,8 +25,3 @@ def parse_duration(duration: Any) -> datetime.timedelta:
|
||||
if isinstance(duration, datetime.timedelta):
|
||||
return duration
|
||||
return datetime.timedelta(seconds=duration)
|
||||
|
||||
|
||||
def get_testdata_file_path(file: str) -> str:
|
||||
testdata_dir = os.path.join(os.path.dirname(__file__), "..", "testdata")
|
||||
return os.path.join(testdata_dir, file)
|
||||
@@ -13,7 +13,7 @@ import pytest
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.fingerprint import LogsOrTracesFingerprint
|
||||
from fixtures.utils import parse_duration, parse_timestamp
|
||||
from fixtures.time import parse_duration, parse_timestamp
|
||||
|
||||
|
||||
class TracesKind(Enum):
|
||||
@@ -689,131 +689,144 @@ class Traces(ABC):
|
||||
return traces
|
||||
|
||||
|
||||
def insert_traces_to_clickhouse(conn, traces: List[Traces]) -> None:
|
||||
"""
|
||||
Insert traces into ClickHouse tables following the same logic as the Go exporter.
|
||||
Handles insertion into:
|
||||
- distributed_signoz_index_v3 (main traces table)
|
||||
- distributed_traces_v3_resource (resource fingerprints)
|
||||
- distributed_tag_attributes_v2 (tag attributes)
|
||||
- distributed_span_attributes_keys (attribute keys)
|
||||
- distributed_signoz_error_index_v2 (error events)
|
||||
|
||||
Pure function so the seeder container (tests/seeder/) can reuse the
|
||||
exact insert path used by the pytest fixtures. `conn` is a
|
||||
clickhouse-connect Client.
|
||||
"""
|
||||
resources: List[TracesResource] = []
|
||||
for trace in traces:
|
||||
resources.extend(trace.resource)
|
||||
|
||||
if len(resources) > 0:
|
||||
conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_traces_v3_resource",
|
||||
data=[resource.np_arr() for resource in resources],
|
||||
)
|
||||
|
||||
tag_attributes: List[TracesTagAttributes] = []
|
||||
for trace in traces:
|
||||
tag_attributes.extend(trace.tag_attributes)
|
||||
|
||||
if len(tag_attributes) > 0:
|
||||
conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_tag_attributes_v2",
|
||||
data=[tag_attribute.np_arr() for tag_attribute in tag_attributes],
|
||||
)
|
||||
|
||||
attribute_keys: List[TracesResourceOrAttributeKeys] = []
|
||||
resource_keys: List[TracesResourceOrAttributeKeys] = []
|
||||
for trace in traces:
|
||||
attribute_keys.extend(trace.attribute_keys)
|
||||
resource_keys.extend(trace.resource_keys)
|
||||
|
||||
if len(attribute_keys) > 0:
|
||||
conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_span_attributes_keys",
|
||||
data=[attribute_key.np_arr() for attribute_key in attribute_keys],
|
||||
)
|
||||
|
||||
if len(resource_keys) > 0:
|
||||
conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_span_attributes_keys",
|
||||
data=[resource_key.np_arr() for resource_key in resource_keys],
|
||||
)
|
||||
|
||||
conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_signoz_index_v3",
|
||||
column_names=[
|
||||
"ts_bucket_start",
|
||||
"resource_fingerprint",
|
||||
"timestamp",
|
||||
"trace_id",
|
||||
"span_id",
|
||||
"trace_state",
|
||||
"parent_span_id",
|
||||
"flags",
|
||||
"name",
|
||||
"kind",
|
||||
"kind_string",
|
||||
"duration_nano",
|
||||
"status_code",
|
||||
"status_message",
|
||||
"status_code_string",
|
||||
"attributes_string",
|
||||
"attributes_number",
|
||||
"attributes_bool",
|
||||
"resources_string",
|
||||
"events",
|
||||
"links",
|
||||
"response_status_code",
|
||||
"external_http_url",
|
||||
"http_url",
|
||||
"external_http_method",
|
||||
"http_method",
|
||||
"http_host",
|
||||
"db_name",
|
||||
"db_operation",
|
||||
"has_error",
|
||||
"is_remote",
|
||||
"resource",
|
||||
],
|
||||
data=[trace.np_arr() for trace in traces],
|
||||
)
|
||||
|
||||
error_events: List[TracesErrorEvent] = []
|
||||
for trace in traces:
|
||||
error_events.extend(trace.error_events)
|
||||
|
||||
if len(error_events) > 0:
|
||||
conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_signoz_error_index_v2",
|
||||
data=[error_event.np_arr() for error_event in error_events],
|
||||
)
|
||||
|
||||
|
||||
_TRACES_TABLES_TO_TRUNCATE = [
|
||||
"signoz_index_v3",
|
||||
"traces_v3_resource",
|
||||
"tag_attributes_v2",
|
||||
"span_attributes_keys",
|
||||
"signoz_error_index_v2",
|
||||
]
|
||||
|
||||
|
||||
def truncate_traces_tables(conn, cluster: str) -> None:
|
||||
"""Truncate all traces tables. Used by the pytest fixture teardown and by
|
||||
the seeder's DELETE /telemetry/traces endpoint."""
|
||||
for table in _TRACES_TABLES_TO_TRUNCATE:
|
||||
conn.query(
|
||||
f"TRUNCATE TABLE signoz_traces.{table} ON CLUSTER '{cluster}' SYNC"
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(name="insert_traces", scope="function")
|
||||
def insert_traces(
|
||||
clickhouse: types.TestContainerClickhouse,
|
||||
) -> Generator[Callable[[List[Traces]], None], Any, None]:
|
||||
def _insert_traces(traces: List[Traces]) -> None:
|
||||
"""
|
||||
Insert traces into ClickHouse tables following the same logic as the Go exporter.
|
||||
This function handles insertion into multiple tables:
|
||||
- distributed_signoz_index_v3 (main traces table)
|
||||
- distributed_traces_v3_resource (resource fingerprints)
|
||||
- distributed_tag_attributes_v2 (tag attributes)
|
||||
- distributed_span_attributes_keys (attribute keys)
|
||||
- distributed_signoz_error_index_v2 (error events)
|
||||
"""
|
||||
resources: List[TracesResource] = []
|
||||
for trace in traces:
|
||||
resources.extend(trace.resource)
|
||||
|
||||
if len(resources) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_traces_v3_resource",
|
||||
data=[resource.np_arr() for resource in resources],
|
||||
)
|
||||
|
||||
tag_attributes: List[TracesTagAttributes] = []
|
||||
for trace in traces:
|
||||
tag_attributes.extend(trace.tag_attributes)
|
||||
|
||||
if len(tag_attributes) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_tag_attributes_v2",
|
||||
data=[tag_attribute.np_arr() for tag_attribute in tag_attributes],
|
||||
)
|
||||
|
||||
attribute_keys: List[TracesResourceOrAttributeKeys] = []
|
||||
resource_keys: List[TracesResourceOrAttributeKeys] = []
|
||||
for trace in traces:
|
||||
attribute_keys.extend(trace.attribute_keys)
|
||||
resource_keys.extend(trace.resource_keys)
|
||||
|
||||
if len(attribute_keys) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_span_attributes_keys",
|
||||
data=[attribute_key.np_arr() for attribute_key in attribute_keys],
|
||||
)
|
||||
|
||||
if len(resource_keys) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_span_attributes_keys",
|
||||
data=[resource_key.np_arr() for resource_key in resource_keys],
|
||||
)
|
||||
|
||||
# Insert main traces
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_signoz_index_v3",
|
||||
column_names=[
|
||||
"ts_bucket_start",
|
||||
"resource_fingerprint",
|
||||
"timestamp",
|
||||
"trace_id",
|
||||
"span_id",
|
||||
"trace_state",
|
||||
"parent_span_id",
|
||||
"flags",
|
||||
"name",
|
||||
"kind",
|
||||
"kind_string",
|
||||
"duration_nano",
|
||||
"status_code",
|
||||
"status_message",
|
||||
"status_code_string",
|
||||
"attributes_string",
|
||||
"attributes_number",
|
||||
"attributes_bool",
|
||||
"resources_string",
|
||||
"events",
|
||||
"links",
|
||||
"response_status_code",
|
||||
"external_http_url",
|
||||
"http_url",
|
||||
"external_http_method",
|
||||
"http_method",
|
||||
"http_host",
|
||||
"db_name",
|
||||
"db_operation",
|
||||
"has_error",
|
||||
"is_remote",
|
||||
"resource",
|
||||
],
|
||||
data=[trace.np_arr() for trace in traces],
|
||||
)
|
||||
|
||||
# Insert error events
|
||||
error_events: List[TracesErrorEvent] = []
|
||||
for trace in traces:
|
||||
error_events.extend(trace.error_events)
|
||||
|
||||
if len(error_events) > 0:
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_traces",
|
||||
table="distributed_signoz_error_index_v2",
|
||||
data=[error_event.np_arr() for error_event in error_events],
|
||||
)
|
||||
insert_traces_to_clickhouse(clickhouse.conn, traces)
|
||||
|
||||
yield _insert_traces
|
||||
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_traces.signoz_index_v3 ON CLUSTER '{clickhouse.env['SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER']}' SYNC"
|
||||
)
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_traces.traces_v3_resource ON CLUSTER '{clickhouse.env['SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER']}' SYNC"
|
||||
)
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_traces.tag_attributes_v2 ON CLUSTER '{clickhouse.env['SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER']}' SYNC"
|
||||
)
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_traces.span_attributes_keys ON CLUSTER '{clickhouse.env['SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER']}' SYNC"
|
||||
)
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_traces.signoz_error_index_v2 ON CLUSTER '{clickhouse.env['SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER']}' SYNC"
|
||||
truncate_traces_tables(
|
||||
clickhouse.conn,
|
||||
clickhouse.env["SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER"],
|
||||
)
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ import docker.errors
|
||||
import pytest
|
||||
from testcontainers.core.container import DockerContainer, Network
|
||||
|
||||
from fixtures import dev, types
|
||||
from fixtures import reuse, types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -58,7 +58,7 @@ def zookeeper(
|
||||
def restore(cache: dict) -> types.TestContainerDocker:
|
||||
return types.TestContainerDocker.from_cache(cache)
|
||||
|
||||
return dev.wrap(
|
||||
return reuse.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"zookeeper",
|
||||
@@ -1,110 +0,0 @@
|
||||
from datetime import datetime, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, List
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.logger import setup_logger
|
||||
from fixtures.logs import Logs
|
||||
from fixtures.metrics import Metrics
|
||||
from fixtures.traces import Traces
|
||||
from fixtures.utils import get_testdata_file_path
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
|
||||
@pytest.fixture(name="create_alert_rule", scope="function")
|
||||
def create_alert_rule(
|
||||
signoz: types.SigNoz, get_token: Callable[[str, str], str]
|
||||
) -> Callable[[dict], str]:
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
rule_ids = []
|
||||
|
||||
def _create_alert_rule(rule_data: dict) -> str:
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/rules"),
|
||||
json=rule_data,
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert (
|
||||
response.status_code == HTTPStatus.OK
|
||||
), f"Failed to create rule, api returned {response.status_code} with response: {response.text}"
|
||||
rule_id = response.json()["data"]["id"]
|
||||
rule_ids.append(rule_id)
|
||||
return rule_id
|
||||
|
||||
def _delete_alert_rule(rule_id: str):
|
||||
logger.info("Deleting rule: %s", {"rule_id": rule_id})
|
||||
response = requests.delete(
|
||||
signoz.self.host_configs["8080"].get(f"/api/v1/rules/{rule_id}"),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
if response.status_code != HTTPStatus.OK:
|
||||
raise Exception( # pylint: disable=broad-exception-raised
|
||||
f"Failed to delete rule, api returned {response.status_code} with response: {response.text}"
|
||||
)
|
||||
|
||||
yield _create_alert_rule
|
||||
# delete the rule on cleanup
|
||||
for rule_id in rule_ids:
|
||||
try:
|
||||
_delete_alert_rule(rule_id)
|
||||
except Exception as e: # pylint: disable=broad-exception-caught
|
||||
logger.error("Error deleting rule: %s", {"rule_id": rule_id, "error": e})
|
||||
|
||||
|
||||
@pytest.fixture(name="insert_alert_data", scope="function")
|
||||
def insert_alert_data(
|
||||
insert_metrics: Callable[[List[Metrics]], None],
|
||||
insert_traces: Callable[[List[Traces]], None],
|
||||
insert_logs: Callable[[List[Logs]], None],
|
||||
) -> Callable[[List[types.AlertData]], None]:
|
||||
|
||||
def _insert_alert_data(
|
||||
alert_data_items: List[types.AlertData],
|
||||
base_time: datetime = None,
|
||||
) -> None:
|
||||
|
||||
metrics: List[Metrics] = []
|
||||
traces: List[Traces] = []
|
||||
logs: List[Logs] = []
|
||||
|
||||
now = base_time or datetime.now(tz=timezone.utc).replace(
|
||||
second=0, microsecond=0
|
||||
)
|
||||
|
||||
for data_item in alert_data_items:
|
||||
if data_item.type == "metrics":
|
||||
_metrics = Metrics.load_from_file(
|
||||
get_testdata_file_path(data_item.data_path),
|
||||
base_time=now,
|
||||
)
|
||||
metrics.extend(_metrics)
|
||||
elif data_item.type == "traces":
|
||||
_traces = Traces.load_from_file(
|
||||
get_testdata_file_path(data_item.data_path),
|
||||
base_time=now,
|
||||
)
|
||||
traces.extend(_traces)
|
||||
elif data_item.type == "logs":
|
||||
_logs = Logs.load_from_file(
|
||||
get_testdata_file_path(data_item.data_path),
|
||||
base_time=now,
|
||||
)
|
||||
logs.extend(_logs)
|
||||
|
||||
# Add data to ClickHouse if any data is present
|
||||
if len(metrics) > 0:
|
||||
insert_metrics(metrics)
|
||||
if len(traces) > 0:
|
||||
insert_traces(traces)
|
||||
if len(logs) > 0:
|
||||
insert_logs(logs)
|
||||
|
||||
yield _insert_alert_data
|
||||
@@ -1,216 +0,0 @@
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, List, Tuple
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
from wiremock.resources.mappings import (
|
||||
HttpMethods,
|
||||
Mapping,
|
||||
MappingRequest,
|
||||
MappingResponse,
|
||||
WireMockMatchers,
|
||||
)
|
||||
|
||||
from fixtures import dev, types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
USER_ADMIN_NAME = "admin"
|
||||
USER_ADMIN_EMAIL = "admin@integration.test"
|
||||
USER_ADMIN_PASSWORD = "password123Z$"
|
||||
|
||||
USER_EDITOR_NAME = "editor"
|
||||
USER_EDITOR_EMAIL = "editor@integration.test"
|
||||
USER_EDITOR_PASSWORD = "password123Z$"
|
||||
|
||||
USER_VIEWER_NAME = "viewer"
|
||||
USER_VIEWER_EMAIL = "viewer@integration.test"
|
||||
USER_VIEWER_PASSWORD = "password123Z$"
|
||||
|
||||
|
||||
@pytest.fixture(name="create_user_admin", scope="package")
|
||||
def create_user_admin(
|
||||
signoz: types.SigNoz, request: pytest.FixtureRequest, pytestconfig: pytest.Config
|
||||
) -> types.Operation:
|
||||
def create() -> None:
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/register"),
|
||||
json={
|
||||
"name": USER_ADMIN_NAME,
|
||||
"orgName": "",
|
||||
"email": USER_ADMIN_EMAIL,
|
||||
"password": USER_ADMIN_PASSWORD,
|
||||
},
|
||||
timeout=5,
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
|
||||
return types.Operation(name="create_user_admin")
|
||||
|
||||
def delete(_: types.Operation) -> None:
|
||||
pass
|
||||
|
||||
def restore(cache: dict) -> types.Operation:
|
||||
return types.Operation(name=cache["name"])
|
||||
|
||||
return dev.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"create_user_admin",
|
||||
lambda: types.Operation(name=""),
|
||||
create,
|
||||
delete,
|
||||
restore,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(name="get_session_context", scope="function")
|
||||
def get_session_context(signoz: types.SigNoz) -> Callable[[str, str], str]:
|
||||
def _get_session_context(email: str) -> str:
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get("/api/v2/sessions/context"),
|
||||
params={
|
||||
"email": email,
|
||||
"ref": f"{signoz.self.host_configs['8080'].base()}",
|
||||
},
|
||||
timeout=5,
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
return response.json()["data"]
|
||||
|
||||
return _get_session_context
|
||||
|
||||
|
||||
@pytest.fixture(name="get_token", scope="function")
|
||||
def get_token(signoz: types.SigNoz) -> Callable[[str, str], str]:
|
||||
def _get_token(email: str, password: str) -> str:
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get("/api/v2/sessions/context"),
|
||||
params={
|
||||
"email": email,
|
||||
"ref": f"{signoz.self.host_configs['8080'].base()}",
|
||||
},
|
||||
timeout=5,
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
org_id = response.json()["data"]["orgs"][0]["id"]
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v2/sessions/email_password"),
|
||||
json={
|
||||
"email": email,
|
||||
"password": password,
|
||||
"orgId": org_id,
|
||||
},
|
||||
timeout=5,
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
return response.json()["data"]["accessToken"]
|
||||
|
||||
return _get_token
|
||||
|
||||
|
||||
@pytest.fixture(name="get_tokens", scope="function")
|
||||
def get_tokens(signoz: types.SigNoz) -> Callable[[str, str], Tuple[str, str]]:
|
||||
def _get_tokens(email: str, password: str) -> str:
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get("/api/v2/sessions/context"),
|
||||
params={
|
||||
"email": email,
|
||||
"ref": f"{signoz.self.host_configs['8080'].base()}",
|
||||
},
|
||||
timeout=5,
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
org_id = response.json()["data"]["orgs"][0]["id"]
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v2/sessions/email_password"),
|
||||
json={
|
||||
"email": email,
|
||||
"password": password,
|
||||
"orgId": org_id,
|
||||
},
|
||||
timeout=5,
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
access_token = response.json()["data"]["accessToken"]
|
||||
refresh_token = response.json()["data"]["refreshToken"]
|
||||
return access_token, refresh_token
|
||||
|
||||
return _get_tokens
|
||||
|
||||
|
||||
# This is not a fixture purposefully, we just want to add a license to the signoz instance.
|
||||
# This is also idempotent in nature.
|
||||
def add_license(
|
||||
signoz: types.SigNoz,
|
||||
make_http_mocks: Callable[[types.TestContainerDocker, List[Mapping]], None],
|
||||
get_token: Callable[[str, str], str], # pylint: disable=redefined-outer-name
|
||||
) -> None:
|
||||
make_http_mocks(
|
||||
signoz.zeus,
|
||||
[
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.GET,
|
||||
url="/v2/licenses/me",
|
||||
headers={
|
||||
"X-Signoz-Cloud-Api-Key": {
|
||||
WireMockMatchers.EQUAL_TO: "secret-key"
|
||||
}
|
||||
},
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": {
|
||||
"id": "0196360e-90cd-7a74-8313-1aa815ce2a67",
|
||||
"key": "secret-key",
|
||||
"valid_from": 1732146923,
|
||||
"valid_until": -1,
|
||||
"status": "VALID",
|
||||
"state": "EVALUATING",
|
||||
"plan": {
|
||||
"name": "ENTERPRISE",
|
||||
},
|
||||
"platform": "CLOUD",
|
||||
"features": [],
|
||||
"event_queue": {},
|
||||
},
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
access_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
response = requests.post(
|
||||
url=signoz.self.host_configs["8080"].get("/api/v3/licenses"),
|
||||
json={"key": "secret-key"},
|
||||
headers={"Authorization": "Bearer " + access_token},
|
||||
timeout=5,
|
||||
)
|
||||
|
||||
if response.status_code == HTTPStatus.CONFLICT:
|
||||
return
|
||||
|
||||
assert response.status_code == HTTPStatus.ACCEPTED
|
||||
|
||||
response = requests.post(
|
||||
url=signoz.zeus.host_configs["8080"].get("/__admin/requests/count"),
|
||||
json={"method": "GET", "url": "/v2/licenses/me"},
|
||||
timeout=5,
|
||||
)
|
||||
|
||||
assert response.json()["count"] == 1
|
||||
@@ -1,115 +0,0 @@
|
||||
"""Reusable helpers for user API tests."""
|
||||
|
||||
from http import HTTPStatus
|
||||
from typing import Dict
|
||||
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
|
||||
USERS_BASE = "/api/v2/users"
|
||||
|
||||
|
||||
def create_active_user(
|
||||
signoz: types.SigNoz,
|
||||
admin_token: str,
|
||||
email: str,
|
||||
role: str,
|
||||
password: str,
|
||||
name: str = "",
|
||||
) -> str:
|
||||
"""Invite a user and activate via resetPassword. Returns user ID."""
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/invite"),
|
||||
json={"email": email, "role": role, "name": name},
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.CREATED, response.text
|
||||
invited_user = response.json()["data"]
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v1/resetPassword"),
|
||||
json={"password": password, "token": invited_user["token"]},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.NO_CONTENT, response.text
|
||||
|
||||
return invited_user["id"]
|
||||
|
||||
|
||||
def find_user_by_email(signoz: types.SigNoz, token: str, email: str) -> Dict:
|
||||
"""Find a user by email from the user list. Raises AssertionError if not found."""
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(USERS_BASE),
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.OK, response.text
|
||||
user = next((u for u in response.json()["data"] if u["email"] == email), None)
|
||||
assert user is not None, f"User with email '{email}' not found"
|
||||
return user
|
||||
|
||||
|
||||
def find_user_with_roles_by_email(signoz: types.SigNoz, token: str, email: str) -> Dict:
|
||||
"""Find a user by email and return UserWithRoles (user fields + userRoles).
|
||||
|
||||
Raises AssertionError if the user is not found.
|
||||
"""
|
||||
user = find_user_by_email(signoz, token, email)
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(f"{USERS_BASE}/{user['id']}"),
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.OK, response.text
|
||||
return response.json()["data"]
|
||||
|
||||
|
||||
def assert_user_has_role(data: Dict, role_name: str) -> None:
|
||||
"""Assert that a UserWithRoles response contains the expected managed role."""
|
||||
role_names = {ur["role"]["name"] for ur in data.get("userRoles", [])}
|
||||
assert role_name in role_names, f"Expected role '{role_name}' in {role_names}"
|
||||
|
||||
|
||||
def change_user_role(
|
||||
signoz: types.SigNoz,
|
||||
admin_token: str,
|
||||
user_id: str,
|
||||
old_role: str,
|
||||
new_role: str,
|
||||
) -> None:
|
||||
"""Change a user's role (remove old, assign new).
|
||||
|
||||
Role names should be managed role names (e.g. signoz-editor).
|
||||
"""
|
||||
# Get current roles to find the old role's ID
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(f"{USERS_BASE}/{user_id}/roles"),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.OK, response.text
|
||||
roles = response.json()["data"]
|
||||
|
||||
old_role_entry = next((r for r in roles if r["name"] == old_role), None)
|
||||
assert old_role_entry is not None, f"User does not have role '{old_role}'"
|
||||
|
||||
# Remove old role
|
||||
response = requests.delete(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"{USERS_BASE}/{user_id}/roles/{old_role_entry['id']}"
|
||||
),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.NO_CONTENT, response.text
|
||||
|
||||
# Assign new role
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(f"{USERS_BASE}/{user_id}/roles"),
|
||||
json={"name": new_role},
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
timeout=5,
|
||||
)
|
||||
assert response.status_code == HTTPStatus.OK, response.text
|
||||
@@ -1,154 +0,0 @@
|
||||
"""Fixtures for cloud integration tests."""
|
||||
|
||||
from typing import Callable
|
||||
|
||||
import requests
|
||||
from wiremock.client import (
|
||||
HttpMethods,
|
||||
Mapping,
|
||||
MappingRequest,
|
||||
MappingResponse,
|
||||
WireMockMatchers,
|
||||
)
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
|
||||
def deprecated_simulate_agent_checkin(
|
||||
signoz: types.SigNoz,
|
||||
admin_token: str,
|
||||
cloud_provider: str,
|
||||
account_id: str,
|
||||
cloud_account_id: str,
|
||||
) -> requests.Response:
|
||||
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/agent-check-in"
|
||||
|
||||
checkin_payload = {
|
||||
"account_id": account_id,
|
||||
"cloud_account_id": cloud_account_id,
|
||||
"data": {},
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
json=checkin_payload,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
if not response.ok:
|
||||
logger.error(
|
||||
"Agent check-in failed: %s, response: %s",
|
||||
response.status_code,
|
||||
response.text,
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def setup_create_account_mocks(
|
||||
signoz: types.SigNoz,
|
||||
make_http_mocks: Callable,
|
||||
) -> None:
|
||||
"""Set up Zeus and Gateway mocks required by the CreateAccount endpoint."""
|
||||
make_http_mocks(
|
||||
signoz.zeus,
|
||||
[
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.GET,
|
||||
url="/v2/deployments/me",
|
||||
headers={
|
||||
"X-Signoz-Cloud-Api-Key": {
|
||||
WireMockMatchers.EQUAL_TO: "secret-key"
|
||||
}
|
||||
},
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": {
|
||||
"name": "test-deployment",
|
||||
"cluster": {"region": {"dns": "test.signoz.cloud"}},
|
||||
},
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
)
|
||||
],
|
||||
)
|
||||
make_http_mocks(
|
||||
signoz.gateway,
|
||||
[
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.GET,
|
||||
url="/v1/workspaces/me/keys/search?name=aws-integration&page=1&per_page=10",
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": [],
|
||||
"_pagination": {"page": 1, "per_page": 10, "total": 0},
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
),
|
||||
Mapping(
|
||||
request=MappingRequest(
|
||||
method=HttpMethods.POST,
|
||||
url="/v1/workspaces/me/keys",
|
||||
),
|
||||
response=MappingResponse(
|
||||
status=200,
|
||||
json_body={
|
||||
"status": "success",
|
||||
"data": {
|
||||
"name": "aws-integration",
|
||||
"value": "test-ingestion-key-123456",
|
||||
},
|
||||
"error": "",
|
||||
},
|
||||
),
|
||||
persistent=False,
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
def simulate_agent_checkin(
|
||||
signoz: types.SigNoz,
|
||||
admin_token: str,
|
||||
cloud_provider: str,
|
||||
account_id: str,
|
||||
cloud_account_id: str,
|
||||
data: dict | None = None,
|
||||
) -> requests.Response:
|
||||
endpoint = f"/api/v1/cloud_integrations/{cloud_provider}/accounts/check_in"
|
||||
|
||||
checkin_payload = {
|
||||
"cloudIntegrationId": account_id,
|
||||
"providerAccountId": cloud_account_id,
|
||||
"data": data or {},
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
signoz.self.host_configs["8080"].get(endpoint),
|
||||
headers={"Authorization": f"Bearer {admin_token}"},
|
||||
json=checkin_payload,
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
if not response.ok:
|
||||
logger.error(
|
||||
"Agent check-in failed: %s, response: %s",
|
||||
response.status_code,
|
||||
response.text,
|
||||
)
|
||||
|
||||
return response
|
||||
@@ -1,256 +0,0 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import List
|
||||
|
||||
import requests
|
||||
|
||||
from fixtures.logs import Logs
|
||||
from fixtures.traces import TraceIdGenerator, Traces, TracesKind, TracesStatusCode
|
||||
|
||||
|
||||
def format_timestamp(dt: datetime) -> str:
|
||||
"""
|
||||
Format a datetime object to match the API's timestamp format.
|
||||
The API returns timestamps with minimal fractional seconds precision.
|
||||
Example: 2026-02-03T20:54:56.5Z for 500000 microseconds
|
||||
"""
|
||||
base_str = dt.strftime("%Y-%m-%dT%H:%M:%S")
|
||||
if dt.microsecond:
|
||||
# Convert microseconds to fractional seconds and strip trailing zeros
|
||||
fractional = f"{dt.microsecond / 1000000:.6f}"[2:].rstrip("0")
|
||||
return f"{base_str}.{fractional}Z"
|
||||
return f"{base_str}Z"
|
||||
|
||||
|
||||
def assert_identical_query_response(
|
||||
response1: requests.Response, response2: requests.Response
|
||||
) -> None:
|
||||
"""
|
||||
Assert that two query responses are identical in status and data.
|
||||
"""
|
||||
assert response1.status_code == response2.status_code, "Status codes do not match"
|
||||
if response1.status_code == HTTPStatus.OK:
|
||||
assert (
|
||||
response1.json()["status"] == response2.json()["status"]
|
||||
), "Response statuses do not match"
|
||||
assert (
|
||||
response1.json()["data"]["data"]["results"]
|
||||
== response2.json()["data"]["data"]["results"]
|
||||
), "Response data do not match"
|
||||
|
||||
|
||||
def generate_logs_with_corrupt_metadata() -> List[Logs]:
|
||||
"""
|
||||
Specifically, entries with 'id', 'timestamp', 'severity_text', 'severity_number' and 'body' fields in metadata
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
return [
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=4),
|
||||
body="POST /integration request received",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"timestamp": "corrupt_data",
|
||||
},
|
||||
attributes={
|
||||
"net.transport": "IP.TCP",
|
||||
"http.scheme": "http",
|
||||
"http.user_agent": "Integration Test",
|
||||
"http.request.method": "POST",
|
||||
"http.response.status_code": "200",
|
||||
"severity_text": "corrupt_data",
|
||||
"timestamp": "corrupt_data",
|
||||
},
|
||||
trace_id="1",
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=3),
|
||||
body="SELECT query executed",
|
||||
severity_text="DEBUG",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"severity_number": "corrupt_data",
|
||||
"id": "corrupt_data",
|
||||
},
|
||||
attributes={
|
||||
"db.name": "integration",
|
||||
"db.operation": "SELECT",
|
||||
"db.statement": "SELECT * FROM integration",
|
||||
"trace_id": "2",
|
||||
},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=2),
|
||||
body="HTTP PATCH failed with 404",
|
||||
severity_text="WARN",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"body": "corrupt_data",
|
||||
"trace_id": "3",
|
||||
},
|
||||
attributes={
|
||||
"http.request.method": "PATCH",
|
||||
"http.status_code": "404",
|
||||
"id": "1",
|
||||
},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=1),
|
||||
body="{'trace_id': '4'}",
|
||||
severity_text="ERROR",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "topic-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-001",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "001",
|
||||
},
|
||||
attributes={
|
||||
"message.type": "SENT",
|
||||
"messaging.operation": "publish",
|
||||
"messaging.message.id": "001",
|
||||
"body": "corrupt_data",
|
||||
"timestamp": "corrupt_data",
|
||||
},
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def generate_traces_with_corrupt_metadata() -> List[Traces]:
|
||||
"""
|
||||
Specifically, entries with 'id', 'timestamp', 'trace_id' and 'duration_nano' fields in metadata
|
||||
"""
|
||||
http_service_trace_id = TraceIdGenerator.trace_id()
|
||||
http_service_span_id = TraceIdGenerator.span_id()
|
||||
http_service_db_span_id = TraceIdGenerator.span_id()
|
||||
http_service_patch_span_id = TraceIdGenerator.span_id()
|
||||
topic_service_trace_id = TraceIdGenerator.trace_id()
|
||||
topic_service_span_id = TraceIdGenerator.span_id()
|
||||
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
return [
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=4),
|
||||
duration=timedelta(seconds=3),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_span_id,
|
||||
parent_span_id="",
|
||||
name="POST /integration",
|
||||
kind=TracesKind.SPAN_KIND_SERVER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"trace_id": "corrupt_data",
|
||||
},
|
||||
attributes={
|
||||
"net.transport": "IP.TCP",
|
||||
"http.scheme": "http",
|
||||
"http.user_agent": "Integration Test",
|
||||
"http.request.method": "POST",
|
||||
"http.response.status_code": "200",
|
||||
"timestamp": "corrupt_data",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=3.5),
|
||||
duration=timedelta(seconds=5),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_db_span_id,
|
||||
parent_span_id=http_service_span_id,
|
||||
name="SELECT",
|
||||
kind=TracesKind.SPAN_KIND_CLIENT,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"timestamp": "corrupt_data",
|
||||
},
|
||||
attributes={
|
||||
"db.name": "integration",
|
||||
"db.operation": "SELECT",
|
||||
"db.statement": "SELECT * FROM integration",
|
||||
"trace_d": "corrupt_data",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=3),
|
||||
duration=timedelta(seconds=1),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_patch_span_id,
|
||||
parent_span_id=http_service_span_id,
|
||||
name="HTTP PATCH",
|
||||
kind=TracesKind.SPAN_KIND_CLIENT,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
"duration_nano": "corrupt_data",
|
||||
},
|
||||
attributes={
|
||||
"http.request.method": "PATCH",
|
||||
"http.status_code": "404",
|
||||
"id": "1",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=1),
|
||||
duration=timedelta(seconds=4),
|
||||
trace_id=topic_service_trace_id,
|
||||
span_id=topic_service_span_id,
|
||||
parent_span_id="",
|
||||
name="topic publish",
|
||||
kind=TracesKind.SPAN_KIND_PRODUCER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "topic-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-001",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "001",
|
||||
},
|
||||
attributes={
|
||||
"message.type": "SENT",
|
||||
"messaging.operation": "publish",
|
||||
"messaging.message.id": "001",
|
||||
"duration_nano": "corrupt_data",
|
||||
"id": 1,
|
||||
},
|
||||
),
|
||||
]
|
||||
@@ -7,12 +7,12 @@ import pytest
|
||||
from wiremock.client import HttpMethods, Mapping, MappingRequest, MappingResponse
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.alertutils import (
|
||||
from fixtures.alerts import (
|
||||
update_rule_channel_name,
|
||||
verify_webhook_alert_expectation,
|
||||
)
|
||||
from fixtures.fs import get_testdata_file_path
|
||||
from fixtures.logger import setup_logger
|
||||
from fixtures.utils import get_testdata_file_path
|
||||
|
||||
# Alert test cases use a 30-second wait time to verify expected alert firing.
|
||||
# Alert data is set up to trigger on the first rule manager evaluation.
|
||||
@@ -11,12 +11,10 @@ from fixtures.auth import (
|
||||
USER_ADMIN_EMAIL,
|
||||
USER_ADMIN_PASSWORD,
|
||||
add_license,
|
||||
)
|
||||
from fixtures.authutils import (
|
||||
assert_user_has_role,
|
||||
find_user_with_roles_by_email,
|
||||
)
|
||||
from fixtures.idputils import (
|
||||
from fixtures.idp import (
|
||||
get_saml_domain,
|
||||
perform_saml_login,
|
||||
)
|
||||
@@ -10,12 +10,10 @@ from fixtures.auth import (
|
||||
USER_ADMIN_EMAIL,
|
||||
USER_ADMIN_PASSWORD,
|
||||
add_license,
|
||||
)
|
||||
from fixtures.authutils import (
|
||||
assert_user_has_role,
|
||||
find_user_with_roles_by_email,
|
||||
)
|
||||
from fixtures.idputils import (
|
||||
from fixtures.idp import (
|
||||
get_oidc_domain,
|
||||
perform_oidc_login,
|
||||
)
|
||||
@@ -6,7 +6,7 @@ import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.cloudintegrationsutils import deprecated_simulate_agent_checkin
|
||||
from fixtures.cloudintegrations import deprecated_simulate_agent_checkin
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -6,7 +6,7 @@ import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.cloudintegrationsutils import deprecated_simulate_agent_checkin
|
||||
from fixtures.cloudintegrations import deprecated_simulate_agent_checkin
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -6,7 +6,7 @@ import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.cloudintegrationsutils import deprecated_simulate_agent_checkin
|
||||
from fixtures.cloudintegrations import deprecated_simulate_agent_checkin
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -11,7 +11,7 @@ from wiremock.client import (
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD, add_license
|
||||
from fixtures.cloudintegrationsutils import setup_create_account_mocks
|
||||
from fixtures.cloudintegrations import setup_create_account_mocks
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -4,7 +4,7 @@ from typing import Callable
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD, add_license
|
||||
from fixtures.cloudintegrationsutils import simulate_agent_checkin
|
||||
from fixtures.cloudintegrations import simulate_agent_checkin
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -6,7 +6,7 @@ import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD, add_license
|
||||
from fixtures.cloudintegrationsutils import simulate_agent_checkin
|
||||
from fixtures.cloudintegrations import simulate_agent_checkin
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -6,7 +6,7 @@ import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD, add_license
|
||||
from fixtures.cloudintegrationsutils import simulate_agent_checkin
|
||||
from fixtures.cloudintegrations import simulate_agent_checkin
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -11,7 +11,7 @@ from wiremock.client import (
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD, add_license
|
||||
from fixtures.gatewayutils import (
|
||||
from fixtures.gateway import (
|
||||
TEST_KEY_ID,
|
||||
common_gateway_headers,
|
||||
get_gateway_requests,
|
||||
@@ -11,7 +11,7 @@ from wiremock.client import (
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import add_license
|
||||
from fixtures.gatewayutils import (
|
||||
from fixtures.gateway import (
|
||||
TEST_KEY_ID,
|
||||
TEST_LIMIT_ID,
|
||||
common_gateway_headers,
|
||||
@@ -11,8 +11,6 @@ from fixtures.auth import (
|
||||
USER_EDITOR_NAME,
|
||||
USER_EDITOR_PASSWORD,
|
||||
USER_VIEWER_EMAIL,
|
||||
)
|
||||
from fixtures.authutils import (
|
||||
assert_user_has_role,
|
||||
find_user_with_roles_by_email,
|
||||
)
|
||||
@@ -5,8 +5,7 @@ import requests
|
||||
from sqlalchemy import sql
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.authutils import find_user_by_email
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD, find_user_by_email
|
||||
from fixtures.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
@@ -9,8 +9,6 @@ from fixtures.auth import (
|
||||
USER_ADMIN_PASSWORD,
|
||||
USER_EDITOR_EMAIL,
|
||||
USER_EDITOR_PASSWORD,
|
||||
)
|
||||
from fixtures.authutils import (
|
||||
change_user_role,
|
||||
create_active_user,
|
||||
)
|
||||
@@ -3,8 +3,7 @@ from typing import Callable
|
||||
|
||||
import requests
|
||||
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.authutils import create_active_user
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD, create_active_user
|
||||
from fixtures.types import SigNoz
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user