Compare commits

..

1 Commits

Author SHA1 Message Date
Vinícius Lourenço
f054121477 feat(pnpm): migrate away from yarn 2026-03-11 23:52:34 -03:00
55 changed files with 24217 additions and 4002 deletions

View File

@@ -77,6 +77,10 @@ jobs:
uses: actions/setup-node@v5
with:
node-version: "22"
- name: setup-pnpm
uses: pnpm/action-setup@v4
with:
version: 10
- name: docker-community
shell: bash
run: |
@@ -106,9 +110,13 @@ jobs:
uses: actions/setup-node@v5
with:
node-version: "22"
- name: setup-pnpm
uses: pnpm/action-setup@v4
with:
version: 10
- name: install-frontend
run: cd frontend && yarn install
run: cd frontend && pnpm install
- name: generate-api-clients
run: |
cd frontend && yarn generate:api
git diff --compact-summary --exit-code || (echo; echo "Unexpected difference in generated api clients. Run yarn generate:api in frontend/ locally and commit."; exit 1)
cd frontend && pnpm generate:api
git diff --compact-summary --exit-code || (echo; echo "Unexpected difference in generated api clients. Run pnpm generate:api in frontend/ locally and commit."; exit 1)

View File

@@ -25,6 +25,10 @@ jobs:
uses: actions/setup-node@v5
with:
node-version: "22"
- name: setup-pnpm
uses: pnpm/action-setup@v4
with:
version: 10
- name: build-frontend
run: make js-build
- name: upload-frontend-artifact

View File

@@ -41,6 +41,10 @@ jobs:
uses: actions/setup-node@v5
with:
node-version: "22"
- name: setup-pnpm
uses: pnpm/action-setup@v4
with:
version: 10
- name: build-frontend
run: make js-build
- name: upload-frontend-artifact

View File

@@ -78,10 +78,15 @@ jobs:
with:
node-version: "22"
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 10
- name: Install frontend dependencies
working-directory: ./frontend
run: |
yarn install
pnpm install
- name: Install uv
uses: astral-sh/setup-uv@v5
@@ -98,7 +103,7 @@ jobs:
- name: Generate permissions.type.ts
working-directory: ./frontend
run: |
yarn generate:permissions-type
pnpm generate:permissions-type
- name: Teardown test environment
if: always()
@@ -108,6 +113,6 @@ jobs:
- name: Check for changes
run: |
if ! git diff --exit-code frontend/src/hooks/useAuthZ/permissions.type.ts; then
echo "::error::frontend/src/hooks/useAuthZ/permissions.type.ts is out of date. Please run the generator locally and commit the changes: npm run generate:permissions-type (from the frontend directory)"
echo "::error::frontend/src/hooks/useAuthZ/permissions.type.ts is out of date. Please run the generator locally and commit the changes: pnpm generate:permissions-type (from the frontend directory)"
exit 1
fi

View File

@@ -27,6 +27,11 @@ jobs:
with:
node-version: lts/*
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 10
- name: Mask secrets and input
run: |
echo "::add-mask::${{ secrets.BASE_URL }}"
@@ -37,12 +42,11 @@ jobs:
- name: Install dependencies
working-directory: frontend
run: |
npm install -g yarn
yarn
pnpm install
- name: Install Playwright Browsers
working-directory: frontend
run: yarn playwright install --with-deps
run: pnpm playwright install --with-deps
- name: Run Playwright Tests
working-directory: frontend
@@ -51,7 +55,7 @@ jobs:
LOGIN_USERNAME="${{ secrets.LOGIN_USERNAME }}" \
LOGIN_PASSWORD="${{ secrets.LOGIN_PASSWORD }}" \
USER_ROLE="${{ github.event.inputs.userRole }}" \
yarn playwright test
pnpm playwright test
- name: Upload Playwright Report
uses: actions/upload-artifact@v4

View File

@@ -1,19 +1,21 @@
# Please adjust to your needs (see https://www.gitpod.io/docs/config-gitpod-file)
# and commit this file to your remote git repository to share the goodness with others.
tasks:
- name: Run Docker Images
init: |
cd ./deploy/docker
sudo docker compose up -d
- name: Install pnpm
init: |
npm i -g pnpm
- name: Run Frontend
init: |
cd ./frontend
yarn install
command:
yarn dev
pnpm install
command: pnpm dev
ports:
- port: 8080

View File

@@ -154,7 +154,7 @@ $(GO_BUILD_ARCHS_ENTERPRISE_RACE): go-build-enterprise-race-%: $(TARGET_DIR)
.PHONY: js-build
js-build: ## Builds the js frontend
@echo ">> building js frontend"
@cd $(JS_BUILD_CONTEXT) && CI=1 yarn install && yarn build
@cd $(JS_BUILD_CONTEXT) && CI=1 pnpm install && pnpm build
##############################################################
# docker commands

View File

@@ -3,8 +3,9 @@ FROM node:22-bookworm AS build
WORKDIR /opt/
COPY ./frontend/ ./
ENV NODE_OPTIONS=--max-old-space-size=8192
RUN CI=1 yarn install
RUN CI=1 yarn build
RUN CI=1 npm i -g pnpm
RUN CI=1 pnpm install
RUN CI=1 pnpm build
FROM golang:1.25-bookworm

View File

@@ -13,13 +13,12 @@ Before diving in, make sure you have these tools installed:
- Download from [go.dev/dl](https://go.dev/dl/)
- Check [go.mod](../../go.mod#L3) for the minimum version
- **Node** - Powers our frontend
- Download from [nodejs.org](https://nodejs.org)
- Check [.nvmrc](../../frontend/.nvmrc) for the version
- **Yarn** - Our frontend package manager
- Follow the [installation guide](https://yarnpkg.com/getting-started/install)
- **Pnpm** - Our frontend package manager
- Follow the [installation guide](https://pnpm.io/installation)
- **Docker** - For running Clickhouse and Postgres locally
- Get it from [docs.docker.com/get-docker](https://docs.docker.com/get-docker/)
@@ -95,7 +94,7 @@ This command:
2. Install dependencies:
```bash
yarn install
pnpm install
```
3. Create a `.env` file in this directory:
@@ -105,10 +104,10 @@ This command:
4. Start the development server:
```bash
yarn dev
pnpm dev
```
> 💡 **Tip**: `yarn dev` will automatically rebuild when you make changes to the code
> 💡 **Tip**: `pnpm dev` will automatically rebuild when you make changes to the code
Now you're all set to start developing! Happy coding! 🎉

View File

@@ -18,40 +18,40 @@ The configuration file is a JSON array containing data source objects. Each obje
### Required Keys
| Key | Type | Description |
|-----|------|-------------|
| `dataSource` | `string` | Unique identifier for the data source (kebab-case, e.g., `"aws-ec2"`) |
| `label` | `string` | Display name shown to users (e.g., `"AWS EC2"`) |
| `tags` | `string[]` | Array of category tags for grouping (e.g., `["AWS"]`, `["database"]`) |
| `module` | `string` | Destination module after onboarding completion |
| `imgUrl` | `string` | Path to the logo/icon **(SVG required)** (e.g., `"/Logos/ec2.svg"`) |
| Key | Type | Description |
| ------------ | ---------- | --------------------------------------------------------------------- |
| `dataSource` | `string` | Unique identifier for the data source (kebab-case, e.g., `"aws-ec2"`) |
| `label` | `string` | Display name shown to users (e.g., `"AWS EC2"`) |
| `tags` | `string[]` | Array of category tags for grouping (e.g., `["AWS"]`, `["database"]`) |
| `module` | `string` | Destination module after onboarding completion |
| `imgUrl` | `string` | Path to the logo/icon **(SVG required)** (e.g., `"/Logos/ec2.svg"`) |
### Optional Keys
| Key | Type | Description |
|-----|------|-------------|
| `link` | `string` | Docs link to redirect to (e.g., `"/docs/aws-monitoring/ec2/"`) |
| `relatedSearchKeywords` | `string[]` | Array of keywords for search functionality |
| `question` | `object` | Nested question object for multi-step flows |
| `internalRedirect` | `boolean` | When `true`, navigates within the app instead of showing docs |
| Key | Type | Description |
| ----------------------- | ---------- | -------------------------------------------------------------- |
| `link` | `string` | Docs link to redirect to (e.g., `"/docs/aws-monitoring/ec2/"`) |
| `relatedSearchKeywords` | `string[]` | Array of keywords for search functionality |
| `question` | `object` | Nested question object for multi-step flows |
| `internalRedirect` | `boolean` | When `true`, navigates within the app instead of showing docs |
## Module Values
The `module` key determines where users are redirected after completing onboarding:
| Value | Destination |
|-------|-------------|
| `apm` | APM / Traces |
| `logs` | Logs Explorer |
| `metrics` | Metrics Explorer |
| `dashboards` | Dashboards |
| `infra-monitoring-hosts` | Infrastructure Monitoring - Hosts |
| `infra-monitoring-k8s` | Infrastructure Monitoring - Kubernetes |
| `messaging-queues-kafka` | Messaging Queues - Kafka |
| `messaging-queues-celery` | Messaging Queues - Celery |
| `integrations` | Integrations page |
| `home` | Home page |
| `api-monitoring` | API Monitoring |
| Value | Destination |
| ------------------------- | -------------------------------------- |
| `apm` | APM / Traces |
| `logs` | Logs Explorer |
| `metrics` | Metrics Explorer |
| `dashboards` | Dashboards |
| `infra-monitoring-hosts` | Infrastructure Monitoring - Hosts |
| `infra-monitoring-k8s` | Infrastructure Monitoring - Kubernetes |
| `messaging-queues-kafka` | Messaging Queues - Kafka |
| `messaging-queues-celery` | Messaging Queues - Celery |
| `integrations` | Integrations page |
| `home` | Home page |
| `api-monitoring` | API Monitoring |
## Question Object Structure
@@ -91,14 +91,14 @@ The `question` object enables multi-step selection flows:
### Question Keys
| Key | Type | Description |
|-----|------|-------------|
| `desc` | `string` | Question text displayed to the user |
| `type` | `string` | Currently only `"select"` is supported |
| `helpText` | `string` | (Optional) Additional help text below the question |
| `helpLink` | `string` | (Optional) Docs link for the help section |
| Key | Type | Description |
| -------------- | -------- | ----------------------------------------------------------- |
| `desc` | `string` | Question text displayed to the user |
| `type` | `string` | Currently only `"select"` is supported |
| `helpText` | `string` | (Optional) Additional help text below the question |
| `helpLink` | `string` | (Optional) Docs link for the help section |
| `helpLinkText` | `string` | (Optional) Text for the help link (default: "Learn more →") |
| `options` | `array` | Array of option objects |
| `options` | `array` | Array of option objects |
## Option Object Structure
@@ -291,7 +291,7 @@ Options can be simple (direct link) or nested (with another question):
1. Add your data source object to the JSON array
2. Ensure the logo exists in `public/Logos/`
3. Test the flow locally with `yarn dev`
3. Test the flow locally with `pnpm dev`
4. Validation:
- Navigate to the [onboarding page](http://localhost:3301/get-started-with-signoz-cloud) on your local machine
- Data source appears in the list

View File

@@ -4,4 +4,5 @@ build
i18-generate-hash.js
src/parser/TraceOperatorParser/**
orval.config.ts
orval.config.ts
pnpm-lock.yaml

View File

@@ -41,7 +41,7 @@ module.exports = {
'jsx-a11y', // Accessibility rules
'import', // Import/export linting
'sonarjs', // Code quality/complexity
// TODO: Uncomment after running: yarn add -D eslint-plugin-spellcheck
// TODO: Uncomment after running: pnpm add -D eslint-plugin-spellcheck
// 'spellcheck', // Correct spellings
],
settings: {

View File

@@ -1,7 +1,7 @@
#!/bin/sh
. "$(dirname "$0")/_/husky.sh"
cd frontend && yarn run commitlint --edit $1
cd frontend && pnpm run commitlint --edit $1
branch="$(git rev-parse --abbrev-ref HEAD)"

View File

@@ -1,4 +1,4 @@
#!/bin/sh
. "$(dirname "$0")/_/husky.sh"
cd frontend && yarn lint-staged
cd frontend && pnpm lint-staged

View File

@@ -12,4 +12,6 @@ public/
# Ignore all files in parser folder:
src/parser/**
src/TraceOperator/parser/**
src/TraceOperator/parser/**
pnpm-lock.yaml

View File

@@ -28,8 +28,8 @@ Follow the steps below
1. ```git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend```
1. change baseURL to ```<test environment URL>``` in file ```src/constants/env.ts```
1. ```yarn install```
1. ```yarn dev```
1. ```pnpm install```
1. ```pnpm dev```
```Note: Please ping us in #contributing channel in our slack community and we will DM you with <test environment URL>```
@@ -41,7 +41,7 @@ This project was bootstrapped with [Create React App](https://github.com/faceboo
In the project directory, you can run:
### `yarn start`
### `pnpm start`
Runs the app in the development mode.\
Open [http://localhost:3301](http://localhost:3301) to view it in the browser.
@@ -49,12 +49,12 @@ Open [http://localhost:3301](http://localhost:3301) to view it in the browser.
The page will reload if you make edits.\
You will also see any lint errors in the console.
### `yarn test`
### `pnpm test`
Launches the test runner in the interactive watch mode.\
See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information.
### `yarn build`
### `pnpm build`
Builds the app for production to the `build` folder.\
It correctly bundles React in production mode and optimizes the build for the best performance.
@@ -64,7 +64,7 @@ Your app is ready to be deployed!
See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information.
### `yarn eject`
### `pnpm eject`
**Note: this is a one-way operation. Once you `eject`, you cant go back!**
@@ -100,6 +100,6 @@ This section has moved here: [https://facebook.github.io/create-react-app/docs/a
This section has moved here: [https://facebook.github.io/create-react-app/docs/deployment](https://facebook.github.io/create-react-app/docs/deployment)
### `yarn build` fails to minify
### `pnpm build` fails to minify
This section has moved here: [https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify](https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify)

View File

@@ -6,6 +6,7 @@
* Adds custom matchers from the react testing library to all tests
*/
import '@testing-library/jest-dom';
import '@testing-library/jest-dom/extend-expect';
import 'jest-styled-components';
import { server } from './src/mocks-server/server';

View File

@@ -80,7 +80,7 @@ export default defineConfig({
header: (info: { title: string; version: string }): string[] => [
`! Do not edit manually`,
`* The file has been auto-generated using Orval for SigNoz`,
`* regenerate with 'yarn generate:api'`,
`* regenerate with 'pnpm generate:api'`,
...(info.title ? [info.title] : []),
...(info.version ? [`OpenAPI spec version: ${info.version}`] : []),
],

View File

@@ -33,6 +33,7 @@
"@ant-design/icons": "4.8.0",
"@codemirror/autocomplete": "6.18.6",
"@codemirror/lang-javascript": "6.2.3",
"@codemirror/state": "6.5.4",
"@dnd-kit/core": "6.1.0",
"@dnd-kit/modifiers": "7.0.0",
"@dnd-kit/sortable": "8.0.0",
@@ -40,7 +41,7 @@
"@grafana/data": "^11.2.3",
"@mdx-js/loader": "2.3.0",
"@mdx-js/react": "2.3.0",
"@monaco-editor/react": "^4.3.1",
"@monaco-editor/react": "~4.3.1",
"@playwright/test": "1.55.1",
"@radix-ui/react-tabs": "1.0.4",
"@radix-ui/react-tooltip": "1.0.7",
@@ -97,7 +98,7 @@
"color-alpha": "2.0.0",
"cross-env": "^7.0.3",
"crypto-js": "4.2.0",
"d3-hierarchy": "3.1.2",
"d3-hierarchy": "1.1.9",
"dayjs": "^1.10.7",
"dompurify": "3.2.4",
"dotenv": "8.2.0",
@@ -122,6 +123,7 @@
"overlayscrollbars-react": "^0.5.6",
"papaparse": "5.4.1",
"posthog-js": "1.298.0",
"rc-select": "~14.10.0",
"rc-tween-one": "3.0.6",
"react": "18.2.0",
"react-addons-update": "15.6.3",
@@ -182,18 +184,22 @@
"@babel/preset-env": "^7.22.14",
"@babel/preset-react": "^7.12.13",
"@babel/preset-typescript": "^7.21.4",
"@codemirror/view": "~6.5.1",
"@commitlint/cli": "^20.4.2",
"@commitlint/config-conventional": "^20.4.2",
"@faker-js/faker": "9.3.0",
"@jest/globals": "30.2.0",
"@jest/types": "^30.3.0",
"@testing-library/jest-dom": "5.16.5",
"@testing-library/react": "13.4.0",
"@testing-library/user-event": "14.4.3",
"@types/color": "^3.0.3",
"@types/crypto-js": "4.2.2",
"@types/d3-hierarchy": "1.1.11",
"@types/dompurify": "^2.4.0",
"@types/event-source-polyfill": "^1.0.0",
"@types/fontfaceobserver": "2.1.0",
"@types/history": "^4.7.11",
"@types/jest": "30.0.0",
"@types/lodash-es": "^4.17.4",
"@types/mini-css-extract-plugin": "^2.5.1",
@@ -212,6 +218,7 @@
"@types/react-syntax-highlighter": "15.5.13",
"@types/redux-mock-store": "1.0.4",
"@types/styled-components": "^5.1.4",
"@types/testing-library__jest-dom": "^5.14.5",
"@types/uuid": "^8.3.1",
"@typescript-eslint/eslint-plugin": "^4.33.0",
"@typescript-eslint/parser": "^4.33.0",
@@ -227,6 +234,7 @@
"eslint-plugin-react-hooks": "^4.3.0",
"eslint-plugin-simple-import-sort": "^7.0.0",
"eslint-plugin-sonarjs": "^0.12.0",
"glob": "^13.0.6",
"husky": "^7.0.4",
"imagemin": "^8.0.1",
"imagemin-svgo": "^10.0.1",
@@ -248,6 +256,7 @@
"sass": "1.97.3",
"sharp": "0.34.5",
"svgo": "4.0.0",
"tailwindcss": "3",
"ts-api-utils": "2.4.0",
"ts-jest": "29.4.6",
"ts-node": "^10.2.1",
@@ -281,6 +290,32 @@
"brace-expansion": "^2.0.2",
"on-headers": "^1.1.0",
"tmp": "0.2.4",
"vite": "npm:rolldown-vite@7.3.1"
"vite": "npm:rolldown-vite@7.3.1",
"@codemirror/view": "~6.5.1",
"@types/d3-hierarchy": "1.1.11"
},
"pnpm": {
"overrides": {
"@types/react": "18.0.26",
"@types/react-dom": "18.0.10",
"debug": "4.3.4",
"semver": "7.5.4",
"xml2js": "0.5.0",
"phin": "^3.7.1",
"body-parser": "1.20.3",
"http-proxy-middleware": "3.0.5",
"cross-spawn": "7.0.5",
"cookie": "^0.7.1",
"serialize-javascript": "6.0.2",
"prismjs": "1.30.0",
"got": "11.8.5",
"form-data": "4.0.4",
"brace-expansion": "^2.0.2",
"on-headers": "^1.1.0",
"tmp": "0.2.4",
"vite": "npm:rolldown-vite@7.3.1",
"@codemirror/view": "~6.5.1",
"@types/d3-hierarchy": "1.1.11"
}
}
}
}

View File

@@ -1,9 +1,11 @@
import { defineConfig, devices } from '@playwright/test';
import dotenv from 'dotenv';
import path from 'path';
import { fileURLToPath } from 'url';
// Read from ".env" file.
dotenv.config({ path: path.resolve(__dirname, '.env') });
const dirname = path.dirname(fileURLToPath(import.meta.url));
dotenv.config({ path: path.resolve(dirname, '.env') });
/**
* Read environment variables from file.

23271
frontend/pnpm-lock.yaml generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -180,7 +180,7 @@ async function main() {
PERMISSIONS_TYPE_FILE,
);
log('Linting generated file...');
execSync(`cd frontend && yarn eslint --fix ${relativePath}`, {
execSync(`cd frontend && pnpm eslint --fix ${relativePath}`, {
cwd: rootDir,
stdio: 'inherit',
});

View File

@@ -25,7 +25,7 @@ echo "\n✅ Prettier formatting successful"
# Fix linting issues
echo "\n\n---\nRunning eslint...\n"
if ! yarn lint --fix --quiet src/api/generated; then
if ! pnpm lint --fix --quiet src/api/generated; then
echo "ESLint check failed! Please fix linting errors before proceeding."
exit 1
fi

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'yarn generate:api'
* * regenerate with 'pnpm generate:api'
* SigNoz
*/
import type {

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'yarn generate:api'
* * regenerate with 'pnpm generate:api'
* SigNoz
*/
import type {

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'yarn generate:api'
* * regenerate with 'pnpm generate:api'
* SigNoz
*/
import type {

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'yarn generate:api'
* * regenerate with 'pnpm generate:api'
* SigNoz
*/
import type {

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'yarn generate:api'
* * regenerate with 'pnpm generate:api'
* SigNoz
*/
import type {

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'yarn generate:api'
* * regenerate with 'pnpm generate:api'
* SigNoz
*/
import type {

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'yarn generate:api'
* * regenerate with 'pnpm generate:api'
* SigNoz
*/
import type {

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'yarn generate:api'
* * regenerate with 'pnpm generate:api'
* SigNoz
*/
import type {

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'yarn generate:api'
* * regenerate with 'pnpm generate:api'
* SigNoz
*/
import type {

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'yarn generate:api'
* * regenerate with 'pnpm generate:api'
* SigNoz
*/
import type {

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'yarn generate:api'
* * regenerate with 'pnpm generate:api'
* SigNoz
*/
import type {

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'yarn generate:api'
* * regenerate with 'pnpm generate:api'
* SigNoz
*/
import type {

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'yarn generate:api'
* * regenerate with 'pnpm generate:api'
* SigNoz
*/
import type {

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'yarn generate:api'
* * regenerate with 'pnpm generate:api'
* SigNoz
*/
import type {

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'yarn generate:api'
* * regenerate with 'pnpm generate:api'
* SigNoz
*/
import type {

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'yarn generate:api'
* * regenerate with 'pnpm generate:api'
* SigNoz
*/
export interface AuthtypesAttributeMappingDTO {

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'yarn generate:api'
* * regenerate with 'pnpm generate:api'
* SigNoz
*/
import type {

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'yarn generate:api'
* * regenerate with 'pnpm generate:api'
* SigNoz
*/
import type {

View File

@@ -30,7 +30,7 @@
"name": "typescript-plugin-css-modules"
}
],
"types": ["vite/client", "node", "jest"]
"types": ["vite/client", "node", "jest", "testing-library__jest-dom"]
},
"exclude": ["node_modules", "src/parser/*.ts", "src/parser/TraceOperatorParser/*.ts", "orval.config.ts"],
"include": [

View File

@@ -12,6 +12,11 @@
resolved "https://registry.yarnpkg.com/@adobe/css-tools/-/css-tools-4.3.3.tgz#90749bde8b89cd41764224f5aac29cd4138f75ff"
integrity sha512-rE0Pygv0sEZ4vBWHlAgJLGDU7Pm8xoO6p3wsEceb7GYAjScrOHpEo8KK/eVkAcnSM+slAEtXjA2JpdjLp4fJQQ==
"@alloc/quick-lru@^5.2.0":
version "5.2.0"
resolved "https://registry.yarnpkg.com/@alloc/quick-lru/-/quick-lru-5.2.0.tgz#7bf68b20c0a350f936915fcae06f58e32007ce30"
integrity sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==
"@ampproject/remapping@^2.2.0":
version "2.2.1"
resolved "https://registry.yarnpkg.com/@ampproject/remapping/-/remapping-2.2.1.tgz#99e8e11851128b8702cd57c33684f1d0f260b630"
@@ -2549,7 +2554,14 @@
"@codemirror/view" "^6.0.0"
crelt "^1.0.5"
"@codemirror/state@^6.0.0", "@codemirror/state@^6.1.1", "@codemirror/state@^6.4.0", "@codemirror/state@^6.5.0":
"@codemirror/state@6.5.4", "@codemirror/state@^6.1.4":
version "6.5.4"
resolved "https://registry.yarnpkg.com/@codemirror/state/-/state-6.5.4.tgz#f5be4b8c0d2310180d5f15a9f641c21ca69faf19"
integrity sha512-8y7xqG/hpB53l25CIoit9/ngxdfoG+fx+V3SHBrinnhOtLvKHRyAJJuHzkWrR4YXXLX8eXBsejgAAxHUOdW1yw==
dependencies:
"@marijn/find-cluster-break" "^1.0.0"
"@codemirror/state@^6.0.0", "@codemirror/state@^6.1.1", "@codemirror/state@^6.4.0":
version "6.5.2"
resolved "https://registry.yarnpkg.com/@codemirror/state/-/state-6.5.2.tgz#8eca3a64212a83367dc85475b7d78d5c9b7076c6"
integrity sha512-FVqsPqtPWKVVL3dPSxy8wEF/ymIEuVzF1PK3VbUgrxXpJUSHQWWZz4JMToquRxnkw+36LTamCZG2iua2Ptq0fA==
@@ -2566,13 +2578,13 @@
"@codemirror/view" "^6.0.0"
"@lezer/highlight" "^1.0.0"
"@codemirror/view@^6.0.0", "@codemirror/view@^6.17.0", "@codemirror/view@^6.23.0", "@codemirror/view@^6.27.0", "@codemirror/view@^6.35.0":
version "6.36.6"
resolved "https://registry.yarnpkg.com/@codemirror/view/-/view-6.36.6.tgz#735a6431caed0c2c7d26c645066b02f10e802812"
integrity sha512-uxugGLet+Nzp0Jcit8Hn3LypM8ioMLKTsdf8FRoT3HWvZtb9GhaWMe0Cc15rz90Ljab4YFJiAulmIVB74OY0IQ==
"@codemirror/view@^6.0.0", "@codemirror/view@^6.17.0", "@codemirror/view@^6.23.0", "@codemirror/view@^6.27.0", "@codemirror/view@^6.35.0", "@codemirror/view@~6.5.1":
version "6.5.1"
resolved "https://registry.yarnpkg.com/@codemirror/view/-/view-6.5.1.tgz#f4533cd796f0569508822d0012bee9a15dc4b3f9"
integrity sha512-xBKP8N3AXOs06VcKvIuvIQoUlGs7Hb78ftJWahLaRX909jKPMgGxR5XjvrawzTTZMSTU3DzdjDNPwG6fPM/ypQ==
dependencies:
"@codemirror/state" "^6.5.0"
style-mod "^4.1.0"
"@codemirror/state" "^6.1.4"
style-mod "^4.0.0"
w3c-keyname "^2.2.4"
"@commander-js/extra-typings@^14.0.0":
@@ -3762,6 +3774,19 @@
"@types/yargs" "^17.0.8"
chalk "^4.0.0"
"@jest/types@^30.3.0":
version "30.3.0"
resolved "https://registry.yarnpkg.com/@jest/types/-/types-30.3.0.tgz#cada800d323cb74945c24ac74615fdb312a6c85f"
integrity sha512-JHm87k7bA33hpBngtU8h6UBub/fqqA9uXfw+21j5Hmk7ooPHlboRNxHq0JcMtC+n8VJGP1mcfnD3Mk+XKe1oSw==
dependencies:
"@jest/pattern" "30.0.1"
"@jest/schemas" "30.0.5"
"@types/istanbul-lib-coverage" "^2.0.6"
"@types/istanbul-reports" "^3.0.4"
"@types/node" "*"
"@types/yargs" "^17.0.33"
chalk "^4.1.2"
"@jridgewell/gen-mapping@^0.3.0", "@jridgewell/gen-mapping@^0.3.2":
version "0.3.3"
resolved "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz"
@@ -4000,19 +4025,20 @@
"@types/mdx" "^2.0.0"
"@types/react" ">=16"
"@monaco-editor/loader@^1.3.3":
version "1.3.3"
resolved "https://registry.npmjs.org/@monaco-editor/loader/-/loader-1.3.3.tgz"
integrity sha512-6KKF4CTzcJiS8BJwtxtfyYt9shBiEv32ateQ9T4UVogwn4HM/uPo9iJd2Dmbkpz8CM6Y0PDUpjnZzCwC+eYo2Q==
"@monaco-editor/loader@^1.2.0":
version "1.7.0"
resolved "https://registry.yarnpkg.com/@monaco-editor/loader/-/loader-1.7.0.tgz#967aaa4601b19e913627688dfe8159d57549e793"
integrity sha512-gIwR1HrJrrx+vfyOhYmCZ0/JcWqG5kbfG7+d3f/C1LXk2EvzAbHSg3MQ5lO2sMlo9izoAZ04shohfKLVT6crVA==
dependencies:
state-local "^1.0.6"
"@monaco-editor/react@^4.3.1":
version "4.5.0"
resolved "https://registry.npmjs.org/@monaco-editor/react/-/react-4.5.0.tgz"
integrity sha512-VJMkp5Fe1+w8pLEq8tZPHZKu8zDXQIA1FtiDTSNccg1D3wg1YIZaH2es2Qpvop1k62g3c/YySRb3bnGXu2XwYQ==
"@monaco-editor/react@~4.3.1":
version "4.3.1"
resolved "https://registry.yarnpkg.com/@monaco-editor/react/-/react-4.3.1.tgz#d65bcbf174c39b6d4e7fec43d0cddda82b70a12a"
integrity sha512-f+0BK1PP/W5I50hHHmwf11+Ea92E5H1VZXs+wvKplWUWOfyMa1VVwqkJrXjRvbcqHL+XdIGYWhWNdi4McEvnZg==
dependencies:
"@monaco-editor/loader" "^1.3.3"
"@monaco-editor/loader" "^1.2.0"
prop-types "^15.7.2"
"@mswjs/cookies@^0.2.2":
version "0.2.2"
@@ -6199,7 +6225,7 @@
dependencies:
"@types/geojson" "*"
"@types/d3-hierarchy@^1.1.6":
"@types/d3-hierarchy@1.1.11", "@types/d3-hierarchy@^1.1.6":
version "1.1.11"
resolved "https://registry.yarnpkg.com/@types/d3-hierarchy/-/d3-hierarchy-1.1.11.tgz#c3bd70d025621f73cb3319e97e08ae4c9051c791"
integrity sha512-lnQiU7jV+Gyk9oQYk0GGYccuexmQPTp08E0+4BidgFdiJivjEvf+esPSdZqCZ2C7UwTWejWpqetVaU8A+eX3FA==
@@ -6704,6 +6730,13 @@
"@types/react" "*"
csstype "^3.0.2"
"@types/testing-library__jest-dom@^5.14.5":
version "5.14.9"
resolved "https://registry.yarnpkg.com/@types/testing-library__jest-dom/-/testing-library__jest-dom-5.14.9.tgz#0fb1e6a0278d87b6737db55af5967570b67cb466"
integrity sha512-FSYhIjFlfOpGSRyVoMBMuS3ws5ehFQODymf3vlI7U1K8c7PHwWwFY7VREfmsuzHSOnoKs/9/Y983ayOs7eRzqw==
dependencies:
"@types/jest" "*"
"@types/testing-library__jest-dom@^5.9.1":
version "5.14.5"
resolved "https://registry.npmjs.org/@types/testing-library__jest-dom/-/testing-library__jest-dom-5.14.5.tgz"
@@ -7510,6 +7543,11 @@ antlr4@4.13.2:
resolved "https://registry.yarnpkg.com/antlr4/-/antlr4-4.13.2.tgz#0d084ad0e32620482a9c3a0e2470c02e72e4006d"
integrity sha512-QiVbZhyy4xAZ17UPEuG3YTOt8ZaoeOR1CvEAqrEsDBsOqINslaB147i9xqljZqoyf5S+EUlGStaj+t22LT9MOg==
any-promise@^1.0.0:
version "1.3.0"
resolved "https://registry.yarnpkg.com/any-promise/-/any-promise-1.3.0.tgz#abc6afeedcea52e809cdc0376aed3ce39635d17f"
integrity sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==
anymatch@^3.0.3, anymatch@^3.1.3, anymatch@~3.1.2:
version "3.1.3"
resolved "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz"
@@ -7523,6 +7561,11 @@ arg@^4.1.0:
resolved "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz"
integrity sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==
arg@^5.0.2:
version "5.0.2"
resolved "https://registry.yarnpkg.com/arg/-/arg-5.0.2.tgz#c81433cc427c92c4dcf4865142dbca6f15acd59c"
integrity sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==
argparse@^1.0.7:
version "1.0.10"
resolved "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz"
@@ -8514,6 +8557,11 @@ camel-case@^4.1.2:
pascal-case "^3.1.2"
tslib "^2.0.3"
camelcase-css@^2.0.1:
version "2.0.1"
resolved "https://registry.yarnpkg.com/camelcase-css/-/camelcase-css-2.0.1.tgz#ee978f6947914cc30c6b44741b6ed1df7f043fd5"
integrity sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==
camelcase-keys@^6.2.2:
version "6.2.2"
resolved "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-6.2.2.tgz"
@@ -8652,7 +8700,7 @@ chartjs-plugin-annotation@^1.4.0:
resolved "https://registry.npmjs.org/chartjs-plugin-annotation/-/chartjs-plugin-annotation-1.4.0.tgz"
integrity sha512-OC0eGoVvdxTtGGi8mV3Dr+G1YmMhtYYQWqGMb2uWcgcnyiBslaRKPofKwAYWPbh7ABnmQNsNDQLIKPH+XiaZLA==
chokidar@^3.4.2, chokidar@^3.5.3:
chokidar@^3.4.2, chokidar@^3.5.3, chokidar@^3.6.0:
version "3.6.0"
resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.6.0.tgz#197c6cc669ef2a8dc5e7b4d97ee4e092c3eb0d5b"
integrity sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==
@@ -8922,6 +8970,11 @@ commander@^14.0.1:
resolved "https://registry.yarnpkg.com/commander/-/commander-14.0.2.tgz#b71fd37fe4069e4c3c7c13925252ada4eba14e8e"
integrity sha512-TywoWNNRbhoD0BXs1P3ZEScW8W5iKrnbithIl0YH+uCmBd0QpPOA8yc82DS3BIE5Ma6FnBVUsJ7wVUDz4dvOWQ==
commander@^4.0.0:
version "4.1.1"
resolved "https://registry.yarnpkg.com/commander/-/commander-4.1.1.tgz#9fd602bd936294e9e9ef46a3f4d6964044b18068"
integrity sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==
commander@^7.2.0:
version "7.2.0"
resolved "https://registry.yarnpkg.com/commander/-/commander-7.2.0.tgz#a36cb57d0b501ce108e4d20559a150a391d97ab7"
@@ -9393,12 +9446,7 @@ d3-geo@3.1.0:
dependencies:
d3-array "2.5.0 - 3"
d3-hierarchy@3.1.2:
version "3.1.2"
resolved "https://registry.yarnpkg.com/d3-hierarchy/-/d3-hierarchy-3.1.2.tgz#b01cd42c1eed3d46db77a5966cf726f8c09160c6"
integrity sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA==
d3-hierarchy@^1.1.4:
d3-hierarchy@1.1.9, d3-hierarchy@^1.1.4:
version "1.1.9"
resolved "https://registry.yarnpkg.com/d3-hierarchy/-/d3-hierarchy-1.1.9.tgz#2f6bee24caaea43f8dc37545fa01628559647a83"
integrity sha512-j8tPxlqh1srJHAtxfvOUwKNYJkQuBFdM1+JAUfq6xqH5eAqf93L7oG1NVqDa4CpFZNvnNKtCYEUC8KY9yEn9lQ==
@@ -9781,6 +9829,11 @@ devlop@^1.0.0:
dependencies:
dequal "^2.0.0"
didyoumean@^1.2.2:
version "1.2.2"
resolved "https://registry.yarnpkg.com/didyoumean/-/didyoumean-1.2.2.tgz#989346ffe9e839b4555ecf5666edea0d3e8ad037"
integrity sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==
diff-sequences@^29.4.3:
version "29.4.3"
resolved "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.4.3.tgz"
@@ -9808,6 +9861,11 @@ direction@^2.0.0:
resolved "https://registry.yarnpkg.com/direction/-/direction-2.0.1.tgz#71800dd3c4fa102406502905d3866e65bdebb985"
integrity sha512-9S6m9Sukh1cZNknO1CWAr2QAWsbKLafQiyM5gZ7VgXHeuaoUwffKN4q6NC4A/Mf9iiPlOXQEKW/Mv/mh9/3YFA==
dlv@^1.1.3:
version "1.1.3"
resolved "https://registry.yarnpkg.com/dlv/-/dlv-1.1.3.tgz#5c198a8a11453596e751494d49874bc7732f2e79"
integrity sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==
dnd-core@^16.0.1:
version "16.0.1"
resolved "https://registry.yarnpkg.com/dnd-core/-/dnd-core-16.0.1.tgz#a1c213ed08961f6bd1959a28bb76f1a868360d19"
@@ -10804,7 +10862,7 @@ fast-diff@^1.1.2:
resolved "https://registry.npmjs.org/fast-diff/-/fast-diff-1.2.0.tgz"
integrity sha512-xJuoT5+L99XlZ8twedaRf6Ax2TgQVxvgZOYoPKqZufmJib0tL2tegPBOZb1pVNgIhlqDlA0eO0c3wBvQcmzx4w==
fast-glob@^3.2.11, fast-glob@^3.2.7:
fast-glob@^3.2.11, fast-glob@^3.2.7, fast-glob@^3.3.2:
version "3.3.3"
resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.3.tgz#d06d585ce8dba90a16b0505c543c3ccfb3aeb818"
integrity sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==
@@ -11339,6 +11397,13 @@ glob-parent@^5.1.2, glob-parent@~5.1.2:
dependencies:
is-glob "^4.0.1"
glob-parent@^6.0.2:
version "6.0.2"
resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-6.0.2.tgz#6d237d99083950c79290f24c7642a3de9a28f9e3"
integrity sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==
dependencies:
is-glob "^4.0.3"
glob@^10.3.10:
version "10.5.0"
resolved "https://registry.yarnpkg.com/glob/-/glob-10.5.0.tgz#8ec0355919cd3338c28428a23d4f24ecc5fe738c"
@@ -11351,6 +11416,15 @@ glob@^10.3.10:
package-json-from-dist "^1.0.0"
path-scurry "^1.11.1"
glob@^13.0.6:
version "13.0.6"
resolved "https://registry.yarnpkg.com/glob/-/glob-13.0.6.tgz#078666566a425147ccacfbd2e332deb66a2be71d"
integrity sha512-Wjlyrolmm8uDpm/ogGyXZXb1Z+Ca2B8NbJwqBVg0axK9GbBeoS7yGV6vjXnYdGm6X53iehEuxxbyiKp8QmN4Vw==
dependencies:
minimatch "^10.2.2"
minipass "^7.1.3"
path-scurry "^2.0.2"
glob@^7.1.3, glob@^7.1.4, glob@^7.1.6:
version "7.2.3"
resolved "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz"
@@ -13395,6 +13469,11 @@ jest@30.2.0:
import-local "^3.2.0"
jest-cli "30.2.0"
jiti@^1.21.7:
version "1.21.7"
resolved "https://registry.yarnpkg.com/jiti/-/jiti-1.21.7.tgz#9dd81043424a3d28458b193d965f0d18a2300ba9"
integrity sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==
jiti@^2.6.1:
version "2.6.1"
resolved "https://registry.yarnpkg.com/jiti/-/jiti-2.6.1.tgz#178ef2fc9a1a594248c20627cd820187a4d78d92"
@@ -13779,6 +13858,11 @@ lilconfig@^2.0.5:
resolved "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz"
integrity sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==
lilconfig@^3.1.1, lilconfig@^3.1.3:
version "3.1.3"
resolved "https://registry.yarnpkg.com/lilconfig/-/lilconfig-3.1.3.tgz#a1bcfd6257f9585bf5ae14ceeebb7b559025e4c4"
integrity sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==
lines-and-columns@^1.1.6:
version "1.2.4"
resolved "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz"
@@ -14036,6 +14120,11 @@ lru-cache@^10.2.0:
resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-10.4.3.tgz#410fc8a17b70e598013df257c2446b7f3383f119"
integrity sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==
lru-cache@^11.0.0:
version "11.2.6"
resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-11.2.6.tgz#356bf8a29e88a7a2945507b31f6429a65a192c58"
integrity sha512-ESL2CrkS/2wTPfuend7Zhkzo2u0daGJ/A2VucJOgQ/C48S/zB8MMeMHSGKYpXhIjbPxfuezITkaBH1wqv00DDQ==
lru-cache@^5.1.1:
version "5.1.1"
resolved "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz"
@@ -14921,6 +15010,13 @@ minimatch@3.1.2, minimatch@^3.0.4, minimatch@^3.1.1, minimatch@^3.1.2:
dependencies:
brace-expansion "^1.1.7"
minimatch@^10.2.2:
version "10.2.4"
resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-10.2.4.tgz#465b3accbd0218b8281f5301e27cedc697f96fde"
integrity sha512-oRjTw/97aTBN0RHbYCdtF1MQfvusSIBQM0IZEgzl6426+8jSC0nF1a/GmnVLpfB9yyr6g6FTqWqiZVbxrtaCIg==
dependencies:
brace-expansion "^5.0.2"
minimatch@^5.0.1:
version "5.1.6"
resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-5.1.6.tgz#1cfcb8cf5522ea69952cd2af95ae09477f122a96"
@@ -14980,7 +15076,7 @@ minipass@^4.2.4:
resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.0.4.tgz#dbce03740f50a4786ba994c1fb908844d27b038c"
integrity sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ==
minipass@^7.1.2:
minipass@^7.1.2, minipass@^7.1.3:
version "7.1.3"
resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.3.tgz#79389b4eb1bb2d003a9bba87d492f2bd37bdc65b"
integrity sha512-tEBHqDnIoM/1rXME1zgka9g6Q2lcoCkxHLuc7ODJ5BxbP5d4c2Z5cGgtXAku59200Cx7diuHTOYfSBD8n6mm8A==
@@ -15062,6 +15158,15 @@ mute-stream@0.0.8:
resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.8.tgz#1630c42b2251ff81e2a283de96a5497ea92e5e0d"
integrity sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==
mz@^2.7.0:
version "2.7.0"
resolved "https://registry.yarnpkg.com/mz/-/mz-2.7.0.tgz#95008057a56cafadc2bc63dde7f9ff6955948e32"
integrity sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==
dependencies:
any-promise "^1.0.0"
object-assign "^4.0.1"
thenify-all "^1.0.0"
nano-css@^5.3.1:
version "5.3.5"
resolved "https://registry.npmjs.org/nano-css/-/nano-css-5.3.5.tgz"
@@ -15343,11 +15448,16 @@ oas-validator@^5.0.8:
should "^13.2.1"
yaml "^1.10.0"
object-assign@^4.1.0, object-assign@^4.1.1:
object-assign@^4.0.1, object-assign@^4.1.0, object-assign@^4.1.1:
version "4.1.1"
resolved "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz"
integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==
object-hash@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/object-hash/-/object-hash-3.0.0.tgz#73f97f753e7baffc0e2cc9d6e079079744ac82e9"
integrity sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==
object-inspect@^1.12.2, object-inspect@^1.12.3, object-inspect@^1.9.0:
version "1.12.3"
resolved "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz"
@@ -15843,6 +15953,14 @@ path-scurry@^1.6.1:
lru-cache "^9.1.1 || ^10.0.0"
minipass "^5.0.0 || ^6.0.2 || ^7.0.0"
path-scurry@^2.0.2:
version "2.0.2"
resolved "https://registry.yarnpkg.com/path-scurry/-/path-scurry-2.0.2.tgz#6be0d0ee02a10d9e0de7a98bae65e182c9061f85"
integrity sha512-3O/iVVsJAPsOnpwWIeD+d6z/7PmqApyQePUtCndjatj/9I5LylHvt5qluFaBT3I5h3r1ejfR056c+FCv+NnNXg==
dependencies:
lru-cache "^11.0.0"
minipass "^7.1.2"
path-to-regexp@^1.7.0:
version "1.9.0"
resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-1.9.0.tgz#5dc0753acbf8521ca2e0f137b4578b917b10cf24"
@@ -15936,6 +16054,11 @@ pidtree@^0.5.0:
resolved "https://registry.npmjs.org/pidtree/-/pidtree-0.5.0.tgz"
integrity sha512-9nxspIM7OpZuhBxPg73Zvyq7j1QMPMPsGKTqRc2XOaFQauDvoNz9fM1Wdkjmeo7l9GXOZiRs97sPkuayl39wjA==
pify@^2.3.0:
version "2.3.0"
resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c"
integrity sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==
pify@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/pify/-/pify-3.0.0.tgz#e5a4acd2c101fdf3d9a4d07f0dbc4db49dd28176"
@@ -15946,16 +16069,16 @@ pify@^4.0.1:
resolved "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz"
integrity sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==
pirates@^4.0.1, pirates@^4.0.7:
version "4.0.7"
resolved "https://registry.yarnpkg.com/pirates/-/pirates-4.0.7.tgz#643b4a18c4257c8a65104b73f3049ce9a0a15e22"
integrity sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==
pirates@^4.0.4:
version "4.0.5"
resolved "https://registry.npmjs.org/pirates/-/pirates-4.0.5.tgz"
integrity sha512-8V9+HQPupnaXMA23c5hvl69zXvTwTzyAYasnkb0Tts4XvO4CliqONMOnvlq26rkhLC3nWDFBJf73LU1e1VZLaQ==
pirates@^4.0.7:
version "4.0.7"
resolved "https://registry.yarnpkg.com/pirates/-/pirates-4.0.7.tgz#643b4a18c4257c8a65104b73f3049ce9a0a15e22"
integrity sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==
pkg-dir@^4.2.0:
version "4.2.0"
resolved "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz"
@@ -16010,6 +16133,22 @@ possible-typed-array-names@^1.0.0:
resolved "https://registry.yarnpkg.com/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz#93e3582bc0e5426586d9d07b79ee40fc841de4ae"
integrity sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==
postcss-import@^15.1.0:
version "15.1.0"
resolved "https://registry.yarnpkg.com/postcss-import/-/postcss-import-15.1.0.tgz#41c64ed8cc0e23735a9698b3249ffdbf704adc70"
integrity sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==
dependencies:
postcss-value-parser "^4.0.0"
read-cache "^1.0.0"
resolve "^1.1.7"
postcss-js@^4.0.1:
version "4.1.0"
resolved "https://registry.yarnpkg.com/postcss-js/-/postcss-js-4.1.0.tgz#003b63c6edde948766e40f3daf7e997ae43a5ce6"
integrity sha512-oIAOTqgIo7q2EOwbhb8UalYePMvYoIeRY2YKntdpFQXNosSu3vLrniGgmH9OKs/qAkfoj5oB3le/7mINW1LCfw==
dependencies:
camelcase-css "^2.0.1"
postcss-load-config@^3.1.4:
version "3.1.4"
resolved "https://registry.yarnpkg.com/postcss-load-config/-/postcss-load-config-3.1.4.tgz#1ab2571faf84bb078877e1d07905eabe9ebda855"
@@ -16018,6 +16157,13 @@ postcss-load-config@^3.1.4:
lilconfig "^2.0.5"
yaml "^1.10.2"
"postcss-load-config@^4.0.2 || ^5.0 || ^6.0":
version "6.0.1"
resolved "https://registry.yarnpkg.com/postcss-load-config/-/postcss-load-config-6.0.1.tgz#6fd7dcd8ae89badcf1b2d644489cbabf83aa8096"
integrity sha512-oPtTM4oerL+UXmx+93ytZVN82RrlY/wPUV8IeDxFrzIjXOLF1pN+EmKPLbubvKHT2HC20xXsCAH2Z+CKV6Oz/g==
dependencies:
lilconfig "^3.1.1"
postcss-modules-extract-imports@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.0.0.tgz#cda1f047c0ae80c97dbe28c3e76a43b88025741d"
@@ -16039,6 +16185,21 @@ postcss-modules-scope@^3.1.1:
dependencies:
postcss-selector-parser "^7.0.0"
postcss-nested@^6.2.0:
version "6.2.0"
resolved "https://registry.yarnpkg.com/postcss-nested/-/postcss-nested-6.2.0.tgz#4c2d22ab5f20b9cb61e2c5c5915950784d068131"
integrity sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==
dependencies:
postcss-selector-parser "^6.1.1"
postcss-selector-parser@^6.1.1, postcss-selector-parser@^6.1.2:
version "6.1.2"
resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz#27ecb41fb0e3b6ba7a1ec84fff347f734c7929de"
integrity sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==
dependencies:
cssesc "^3.0.0"
util-deprecate "^1.0.2"
postcss-selector-parser@^7.0.0:
version "7.1.0"
resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz#4d6af97eba65d73bc4d84bcb343e865d7dd16262"
@@ -16047,7 +16208,7 @@ postcss-selector-parser@^7.0.0:
cssesc "^3.0.0"
util-deprecate "^1.0.2"
postcss-value-parser@^4.0.2, postcss-value-parser@^4.1.0, postcss-value-parser@^4.2.0:
postcss-value-parser@^4.0.0, postcss-value-parser@^4.0.2, postcss-value-parser@^4.1.0, postcss-value-parser@^4.2.0:
version "4.2.0"
resolved "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz"
integrity sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==
@@ -16070,6 +16231,15 @@ postcss@^8.0.0:
picocolors "^1.0.0"
source-map-js "^1.2.0"
postcss@^8.4.47:
version "8.5.8"
resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.5.8.tgz#6230ecc8fb02e7a0f6982e53990937857e13f399"
integrity sha512-OW/rX8O/jXnm82Ey1k44pObPtdblfiuWnrd8X7GJ7emImCOstunGbXUpp7HdBrFQX6rJzn3sPT397Wp5aCwCHg==
dependencies:
nanoid "^3.3.11"
picocolors "^1.1.1"
source-map-js "^1.2.1"
posthog-js@1.298.0:
version "1.298.0"
resolved "https://registry.yarnpkg.com/posthog-js/-/posthog-js-1.298.0.tgz#54730988a753220aef54af0c4e69960633917450"
@@ -17100,6 +17270,13 @@ react@18.2.0:
dependencies:
loose-envify "^1.1.0"
read-cache@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/read-cache/-/read-cache-1.0.0.tgz#e664ef31161166c9751cdbe8dbcf86b5fb58f774"
integrity sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==
dependencies:
pify "^2.3.0"
read-pkg-up@^7.0.1:
version "7.0.1"
resolved "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz"
@@ -17586,6 +17763,15 @@ resolve-protobuf-schema@^2.1.0:
dependencies:
protocol-buffers-schema "^3.3.1"
resolve@^1.1.7, resolve@^1.12.0, resolve@^1.22.11, resolve@^1.22.4, resolve@^1.22.8:
version "1.22.11"
resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.11.tgz#aad857ce1ffb8bfa9b0b1ac29f1156383f68c262"
integrity sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==
dependencies:
is-core-module "^2.16.1"
path-parse "^1.0.7"
supports-preserve-symlinks-flag "^1.0.0"
resolve@^1.10.0, resolve@^1.14.2, resolve@^1.19.0:
version "1.22.2"
resolved "https://registry.npmjs.org/resolve/-/resolve-1.22.2.tgz"
@@ -17595,15 +17781,6 @@ resolve@^1.10.0, resolve@^1.14.2, resolve@^1.19.0:
path-parse "^1.0.7"
supports-preserve-symlinks-flag "^1.0.0"
resolve@^1.12.0, resolve@^1.22.11, resolve@^1.22.4:
version "1.22.11"
resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.11.tgz#aad857ce1ffb8bfa9b0b1ac29f1156383f68c262"
integrity sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==
dependencies:
is-core-module "^2.16.1"
path-parse "^1.0.7"
supports-preserve-symlinks-flag "^1.0.0"
resolve@^2.0.0-next.5:
version "2.0.0-next.5"
resolved "https://registry.yarnpkg.com/resolve/-/resolve-2.0.0-next.5.tgz#6b0ec3107e671e52b68cd068ef327173b90dc03c"
@@ -18626,7 +18803,7 @@ strtok3@^6.2.4:
"@tokenizer/token" "^0.3.0"
peek-readable "^4.1.0"
style-mod@^4.0.0, style-mod@^4.1.0:
style-mod@^4.0.0:
version "4.1.2"
resolved "https://registry.yarnpkg.com/style-mod/-/style-mod-4.1.2.tgz#ca238a1ad4786520f7515a8539d5a63691d7bf67"
integrity sha512-wnD1HyVqpJUI2+eKZ+eo1UwghftP6yuFheBqqe+bWCotBjC2K1YnteJILRMs3SM4V/0dLEW1SC27MWP5y+mwmw==
@@ -18680,6 +18857,19 @@ stylus@^0.62.0:
sax "~1.3.0"
source-map "^0.7.3"
sucrase@^3.35.0:
version "3.35.1"
resolved "https://registry.yarnpkg.com/sucrase/-/sucrase-3.35.1.tgz#4619ea50393fe8bd0ae5071c26abd9b2e346bfe1"
integrity sha512-DhuTmvZWux4H1UOnWMB3sk0sbaCVOoQZjv8u1rDoTV0HTdGem9hkAZtl4JZy8P2z4Bg0nT+YMeOFyVr4zcG5Tw==
dependencies:
"@jridgewell/gen-mapping" "^0.3.2"
commander "^4.0.0"
lines-and-columns "^1.1.6"
mz "^2.7.0"
pirates "^4.0.1"
tinyglobby "^0.2.11"
ts-interface-checker "^0.1.9"
supports-color@^5.3.0, supports-color@^5.5.0:
version "5.5.0"
resolved "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz"
@@ -18802,6 +18992,34 @@ tailwindcss-animate@^1.0.7:
resolved "https://registry.yarnpkg.com/tailwindcss-animate/-/tailwindcss-animate-1.0.7.tgz#318b692c4c42676cc9e67b19b78775742388bef4"
integrity sha512-bl6mpH3T7I3UFxuvDEXLxy/VuFxBk5bbzplh7tXI68mwMokNYd1t9qPBHlnyTwfa4JGC4zP516I1hYYtQ/vspA==
tailwindcss@3:
version "3.4.19"
resolved "https://registry.yarnpkg.com/tailwindcss/-/tailwindcss-3.4.19.tgz#af2a0a4ae302d52ebe078b6775e799e132500ee2"
integrity sha512-3ofp+LL8E+pK/JuPLPggVAIaEuhvIz4qNcf3nA1Xn2o/7fb7s/TYpHhwGDv1ZU3PkBluUVaF8PyCHcm48cKLWQ==
dependencies:
"@alloc/quick-lru" "^5.2.0"
arg "^5.0.2"
chokidar "^3.6.0"
didyoumean "^1.2.2"
dlv "^1.1.3"
fast-glob "^3.3.2"
glob-parent "^6.0.2"
is-glob "^4.0.3"
jiti "^1.21.7"
lilconfig "^3.1.3"
micromatch "^4.0.8"
normalize-path "^3.0.0"
object-hash "^3.0.0"
picocolors "^1.1.1"
postcss "^8.4.47"
postcss-import "^15.1.0"
postcss-js "^4.0.1"
postcss-load-config "^4.0.2 || ^5.0 || ^6.0"
postcss-nested "^6.2.0"
postcss-selector-parser "^6.1.2"
resolve "^1.22.8"
sucrase "^3.35.0"
tapable@^2.2.1:
version "2.3.0"
resolved "https://registry.yarnpkg.com/tapable/-/tapable-2.3.0.tgz#7e3ea6d5ca31ba8e078b560f0d83ce9a14aa8be6"
@@ -18836,6 +19054,20 @@ text-table@^0.2.0:
resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4"
integrity sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==
thenify-all@^1.0.0:
version "1.6.0"
resolved "https://registry.yarnpkg.com/thenify-all/-/thenify-all-1.6.0.tgz#1a1918d402d8fc3f98fbf234db0bcc8cc10e9726"
integrity sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==
dependencies:
thenify ">= 3.1.0 < 4"
"thenify@>= 3.1.0 < 4":
version "3.3.1"
resolved "https://registry.yarnpkg.com/thenify/-/thenify-3.3.1.tgz#8932e686a4066038a016dd9e2ca46add9838a95f"
integrity sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==
dependencies:
any-promise "^1.0.0"
throttle-debounce@^3.0.1:
version "3.0.1"
resolved "https://registry.npmjs.org/throttle-debounce/-/throttle-debounce-3.0.1.tgz"
@@ -18888,7 +19120,7 @@ tinyexec@^1.0.0:
resolved "https://registry.yarnpkg.com/tinyexec/-/tinyexec-1.0.2.tgz#bdd2737fe2ba40bd6f918ae26642f264b99ca251"
integrity sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==
tinyglobby@^0.2.15:
tinyglobby@^0.2.11, tinyglobby@^0.2.15:
version "0.2.15"
resolved "https://registry.yarnpkg.com/tinyglobby/-/tinyglobby-0.2.15.tgz#e228dd1e638cea993d2fdb4fcd2d4602a79951c2"
integrity sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==
@@ -18990,6 +19222,11 @@ ts-easing@^0.2.0:
resolved "https://registry.npmjs.org/ts-easing/-/ts-easing-0.2.0.tgz"
integrity sha512-Z86EW+fFFh/IFB1fqQ3/+7Zpf9t2ebOAxNI/V6Wo7r5gqiqtxmgTlQ1qbqQcjLKYeSHPTsEmvlJUDg/EuL0uHQ==
ts-interface-checker@^0.1.9:
version "0.1.13"
resolved "https://registry.yarnpkg.com/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz#784fd3d679722bc103b1b4b8030bcddb5db2a699"
integrity sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==
ts-jest@29.4.6:
version "29.4.6"
resolved "https://registry.yarnpkg.com/ts-jest/-/ts-jest-29.4.6.tgz#51cb7c133f227396818b71297ad7409bb77106e9"

View File

@@ -69,7 +69,6 @@ func readAsTimeSeries(rows driver.Rows, queryWindow *qbtypes.TimeRange, step qbt
key string // deterministic join of label values
}
seriesMap := map[sKey]*qbtypes.TimeSeries{}
var keyOrder []sKey // preserves ClickHouse row-arrival order
stepMs := uint64(step.Duration.Milliseconds())
@@ -220,7 +219,6 @@ func readAsTimeSeries(rows driver.Rows, queryWindow *qbtypes.TimeRange, step qbt
if !ok {
series = &qbtypes.TimeSeries{Labels: lblObjs}
seriesMap[key] = series
keyOrder = append(keyOrder, key)
}
series.Values = append(series.Values, &qbtypes.TimeSeriesValue{
Timestamp: ts,
@@ -252,8 +250,8 @@ func readAsTimeSeries(rows driver.Rows, queryWindow *qbtypes.TimeRange, step qbt
Alias: "__result_" + strconv.Itoa(i),
}
}
for _, k := range keyOrder {
buckets[k.agg].Series = append(buckets[k.agg].Series, seriesMap[k])
for k, s := range seriesMap {
buckets[k.agg].Series = append(buckets[k.agg].Series, s)
}
var nonEmpty []*qbtypes.AggregationBucket

View File

@@ -185,6 +185,22 @@ func postProcessMetricQuery(
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
req *qbtypes.QueryRangeRequest,
) *qbtypes.Result {
config := query.Aggregations[0]
spaceAggOrderBy := fmt.Sprintf("%s(%s)", config.SpaceAggregation.StringValue(), config.MetricName)
timeAggOrderBy := fmt.Sprintf("%s(%s)", config.TimeAggregation.StringValue(), config.MetricName)
timeSpaceAggOrderBy := fmt.Sprintf("%s(%s(%s))", config.SpaceAggregation.StringValue(), config.TimeAggregation.StringValue(), config.MetricName)
for idx := range query.Order {
if query.Order[idx].Key.Name == spaceAggOrderBy ||
query.Order[idx].Key.Name == timeAggOrderBy ||
query.Order[idx].Key.Name == timeSpaceAggOrderBy {
query.Order[idx].Key.Name = qbtypes.DefaultOrderByKey
}
}
result = q.applySeriesLimit(result, query.Limit, query.Order)
if len(query.Functions) > 0 {
step := query.StepInterval.Duration.Milliseconds()
functions := q.prepareFillZeroArgsWithStep(query.Functions, req, step)

View File

@@ -132,14 +132,6 @@ func GroupByKeys(keys []qbtypes.GroupByKey) []string {
return k
}
func OrderByKeys(keys []qbtypes.OrderBy) []string {
k := []string{}
for _, key := range keys {
k = append(k, "`"+key.Key.Name+"`")
}
return k
}
func FormatValueForContains(value any) string {
if value == nil {
return ""

View File

@@ -51,7 +51,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, max(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, max(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747785600000), uint64(1747983420000), "cartservice", "cumulative", 0},
},
expectedErr: nil,
@@ -84,7 +84,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747872000000), uint64(1747983420000), "cartservice", "delta"},
},
expectedErr: nil,
@@ -117,7 +117,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `service.name`, avg(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `service.name`, avg(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747872000000), uint64(1747983420000), "cartservice", "delta", 0},
},
expectedErr: nil,
@@ -150,7 +150,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'host.name') AS `host.name`, avg(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'host.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte WHERE (`host.name`) IN (SELECT `host.name` FROM __spatial_aggregation_cte GROUP BY `host.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `host.name`) DESC, `host.name`, ts ASC",
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'host.name') AS `host.name`, avg(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'host.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `host.name`, ts",
Args: []any{"system.memory.usage", uint64(1747872000000), uint64(1747983420000), "big-data-node-1", "unspecified", 0},
},
expectedErr: nil,

View File

@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"log/slog"
"strings"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/flagger"
@@ -547,16 +546,6 @@ func (b *MetricQueryStatementBuilder) BuildFinalSelect(
) (*qbtypes.Statement, error) {
metricType := query.Aggregations[0].Type
spaceAgg := query.Aggregations[0].SpaceAggregation
finalCTE := "__spatial_aggregation_cte"
if metricType == metrictypes.HistogramType {
histogramCTE, histogramCTEArgs, err := b.buildHistogramCTE(query)
if err != nil {
return nil, err
}
cteFragments = append(cteFragments, histogramCTE)
cteArgs = append(cteArgs, histogramCTEArgs)
finalCTE = "__histogram_cte"
}
combined := querybuilder.CombineCTEs(cteFragments)
@@ -566,97 +555,60 @@ func (b *MetricQueryStatementBuilder) BuildFinalSelect(
}
sb := sqlbuilder.NewSelectBuilder()
sb.Select("*")
sb.From(finalCTE)
if query.Having != nil && query.Having.Expression != "" {
rewriter := querybuilder.NewHavingExpressionRewriter()
rewrittenExpr := rewriter.RewriteForMetrics(query.Having.Expression, query.Aggregations)
sb.Where(rewrittenExpr)
}
groupByKeys := querybuilder.GroupByKeys(query.GroupBy)
hasOrder := len(query.Order) > 0
hasLimit := query.Limit > 0
hasGroupBy := len(groupByKeys) > 0
if metricType == metrictypes.HistogramType && spaceAgg.IsPercentile() {
quantile := query.Aggregations[0].SpaceAggregation.Percentile()
sb.Select("ts")
for _, g := range query.GroupBy {
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
sb.SelectMore(fmt.Sprintf(
"histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), %.3f) AS value",
quantile,
))
sb.From("__spatial_aggregation_cte")
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
sb.GroupBy("ts")
if query.Having != nil && query.Having.Expression != "" {
rewriter := querybuilder.NewHavingExpressionRewriter()
rewrittenExpr := rewriter.RewriteForMetrics(query.Having.Expression, query.Aggregations)
sb.Having(rewrittenExpr)
}
} else if metricType == metrictypes.HistogramType && spaceAgg == metrictypes.SpaceAggregationCount && query.Aggregations[0].ComparisonSpaceAggregationParam != nil {
sb.Select("ts")
if !hasGroupBy {
// do nothing, limits and orders don't mean anything
} else if hasOrder && hasLimit {
labelSelectorSubQueryBuilder := sqlbuilder.NewSelectBuilder()
labelSelectorSubQueryBuilder.Select(groupByKeys...)
labelSelectorSubQueryBuilder.From(finalCTE)
labelSelectorOrderClauses := []string{}
orderedKeys := map[string]struct{}{} // this will be used to add the remaining keys as tie breakers in the end
for _, o := range query.Order {
key := o.Key.Name
var clause string
if strings.Contains(key, query.Aggregations[0].MetricName) {
clause = fmt.Sprintf("avg(value) %s", o.Direction.StringValue())
} else {
clause = fmt.Sprintf("`%s` %s", key, o.Direction.StringValue())
orderedKeys[fmt.Sprintf("`%s`", key)] = struct{}{}
}
labelSelectorOrderClauses = append(labelSelectorOrderClauses, clause)
for _, g := range query.GroupBy {
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
for _, gk := range groupByKeys { // keys that haven't been added via order by keys will be added at the end as tie breakers
if _, ok := orderedKeys[gk]; !ok {
labelSelectorOrderClauses = append(labelSelectorOrderClauses, fmt.Sprintf("%s ASC", gk))
}
}
labelSelectorSubQueryBuilder.GroupBy(groupByKeys...)
labelSelectorSubQueryBuilder.OrderBy(labelSelectorOrderClauses...)
labelSelectorSubQuery, _ := labelSelectorSubQueryBuilder.BuildWithFlavor(sqlbuilder.ClickHouse)
labelSelectorSubQuery = fmt.Sprintf("%s LIMIT %d", labelSelectorSubQuery, query.Limit)
sb.Where(fmt.Sprintf("(%s) IN (%s)", strings.Join(groupByKeys, ", "), labelSelectorSubQuery))
for _, o := range query.Order {
key := o.Key.Name
var clause string
if strings.Contains(key, query.Aggregations[0].MetricName) {
clause = fmt.Sprintf("avg(value) OVER (PARTITION BY %s) %s", strings.Join(groupByKeys, ", "), o.Direction.StringValue())
} else {
clause = fmt.Sprintf("`%s` %s", key, o.Direction.StringValue())
}
sb.OrderBy(clause)
aggQuery, err := AggregationQueryForHistogramCountWithParams(query.Aggregations[0].ComparisonSpaceAggregationParam)
if err != nil {
return nil, err
}
} else if hasOrder {
// order by without limit: apply order by clauses directly
for _, o := range query.Order {
key := o.Key.Name
if strings.Contains(key, query.Aggregations[0].MetricName) {
sb.OrderBy(fmt.Sprintf("avg(value) OVER (PARTITION BY %s) %s", strings.Join(groupByKeys, ", "), o.Direction.StringValue()))
continue
}
sb.OrderBy(fmt.Sprintf("`%s` %s", o.Key.Name, o.Direction.StringValue()))
}
} else if hasLimit {
labelSelectorSubQueryBuilder := sqlbuilder.NewSelectBuilder()
labelSelectorSubQueryBuilder.Select(groupByKeys...)
labelSelectorSubQueryBuilder.From(finalCTE)
labelSelectorSubQueryBuilder.GroupBy(groupByKeys...)
labelSelectorSubQueryBuilder.OrderBy("avg(value) DESC")
labelSelectorSubQuery, _ := labelSelectorSubQueryBuilder.BuildWithFlavor(sqlbuilder.ClickHouse)
labelSelectorSubQuery = fmt.Sprintf("%s LIMIT %d", labelSelectorSubQuery, query.Limit)
sb.SelectMore(aggQuery)
sb.Where(fmt.Sprintf("(%s) IN (%s)", strings.Join(groupByKeys, ", "), labelSelectorSubQuery))
sb.OrderBy(fmt.Sprintf("avg(value) OVER (PARTITION BY %s) DESC", strings.Join(groupByKeys, ", ")))
sb.From("__spatial_aggregation_cte")
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
sb.GroupBy("ts")
if query.Having != nil && query.Having.Expression != "" {
rewriter := querybuilder.NewHavingExpressionRewriter()
rewrittenExpr := rewriter.RewriteForMetrics(query.Having.Expression, query.Aggregations)
sb.Having(rewrittenExpr)
}
} else {
// grouping without order by or limit: sort by avg(value) DESC with labels as tiebreakers
sb.OrderBy(fmt.Sprintf("avg(value) OVER (PARTITION BY %s) DESC", strings.Join(groupByKeys, ", ")))
}
// add any group-by keys not already in the order-by as tiebreakers
orderKeySet := make(map[string]struct{})
for _, o := range query.Order {
orderKeySet[fmt.Sprintf("`%s`", o.Key.Name)] = struct{}{}
}
for _, g := range groupByKeys {
if _, exists := orderKeySet[g]; !exists {
sb.OrderBy(g)
// for count aggregation on histograms with no params, the exact result of spatial aggregation can be sent forward
sb.Select("*")
sb.From("__spatial_aggregation_cte")
if query.Having != nil && query.Having.Expression != "" {
rewriter := querybuilder.NewHavingExpressionRewriter()
rewrittenExpr := rewriter.RewriteForMetrics(query.Having.Expression, query.Aggregations)
sb.Where(rewrittenExpr)
}
}
sb.OrderBy("ts ASC")
sb.OrderBy(querybuilder.GroupByKeys(query.GroupBy)...)
sb.OrderBy("ts")
if metricType == metrictypes.HistogramType && spaceAgg == metrictypes.SpaceAggregationCount && query.Aggregations[0].ComparisonSpaceAggregationParam == nil {
sb.OrderBy("toFloat64(le)")
}
@@ -664,45 +616,3 @@ func (b *MetricQueryStatementBuilder) BuildFinalSelect(
q, a := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
return &qbtypes.Statement{Query: combined + q, Args: append(args, a...)}, nil
}
func (b *MetricQueryStatementBuilder) buildHistogramCTE(
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
) (string, []any, error) {
spaceAgg := query.Aggregations[0].SpaceAggregation
histogramCTEQueryBuilder := sqlbuilder.NewSelectBuilder()
if spaceAgg.IsPercentile() {
histogramCTEQueryBuilder.Select("ts")
for _, g := range query.GroupBy {
histogramCTEQueryBuilder.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
quantile := spaceAgg.Percentile()
histogramCTEQueryBuilder.SelectMore(fmt.Sprintf(
"histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), %.3f) AS value",
quantile,
))
histogramCTEQueryBuilder.From("__spatial_aggregation_cte")
histogramCTEQueryBuilder.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
histogramCTEQueryBuilder.GroupBy("ts")
} else if spaceAgg == metrictypes.SpaceAggregationCount && query.Aggregations[0].ComparisonSpaceAggregationParam != nil {
histogramCTEQueryBuilder.Select("ts")
for _, g := range query.GroupBy {
histogramCTEQueryBuilder.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
aggQuery, err := AggregationQueryForHistogramCountWithParams(query.Aggregations[0].ComparisonSpaceAggregationParam)
if err != nil {
return "", nil, err
}
histogramCTEQueryBuilder.SelectMore(aggQuery)
histogramCTEQueryBuilder.From("__spatial_aggregation_cte")
histogramCTEQueryBuilder.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
histogramCTEQueryBuilder.GroupBy("ts")
} else {
// for count aggregation on histograms with no params, the exact result of spatial aggregation can be sent forward
histogramCTEQueryBuilder.Select("*")
histogramCTEQueryBuilder.From("__spatial_aggregation_cte")
}
histogramQueryCTE, histogramQueryCTEArgs := histogramCTEQueryBuilder.BuildWithFlavor(sqlbuilder.ClickHouse)
histogramCTE := fmt.Sprintf("__histogram_cte AS (%s)", histogramQueryCTE)
return histogramCTE, histogramQueryCTEArgs, nil
}

View File

@@ -15,17 +15,16 @@ import (
)
func TestStatementBuilder(t *testing.T) {
type baseQuery struct {
name string
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]
orderKey string
args []any
cte string
}
bases := []baseQuery{
cases := []struct {
name string
requestType qbtypes.RequestType
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]
expected qbtypes.Statement
expectedErr error
}{
{
name: "cumulative_rate_sum",
name: "test_cumulative_rate_sum",
requestType: qbtypes.RequestTypeTimeSeries,
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
@@ -41,16 +40,24 @@ func TestStatementBuilder(t *testing.T) {
Filter: &qbtypes.Filter{
Expression: "service.name = 'cartservice'",
},
Limit: 10,
GroupBy: []qbtypes.GroupByKey{
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
},
},
},
},
orderKey: "service.name",
args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
cte: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`)",
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
},
expectedErr: nil,
},
{
name: "cumulative_rate_sum_with_mat_column",
name: "test_cumulative_rate_sum_with_mat_column",
requestType: qbtypes.RequestTypeTimeSeries,
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
@@ -66,16 +73,24 @@ func TestStatementBuilder(t *testing.T) {
Filter: &qbtypes.Filter{
Expression: "materialized.key.name REGEXP 'cartservice' OR service.name = 'cartservice'",
},
Limit: 10,
GroupBy: []qbtypes.GroupByKey{
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
},
},
},
},
orderKey: "service.name",
args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
cte: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND (match(JSONExtractString(labels, 'materialized.key.name'), ?) OR JSONExtractString(labels, 'service.name') = ?) GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`)",
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND (match(JSONExtractString(labels, 'materialized.key.name'), ?) OR JSONExtractString(labels, 'service.name') = ?) GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
},
expectedErr: nil,
},
{
name: "delta_rate_sum",
name: "test_delta_rate_sum",
requestType: qbtypes.RequestTypeTimeSeries,
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
@@ -91,16 +106,24 @@ func TestStatementBuilder(t *testing.T) {
Filter: &qbtypes.Filter{
Expression: "service.name = 'cartservice'",
},
Limit: 10,
GroupBy: []qbtypes.GroupByKey{
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
},
},
},
},
orderKey: "service.name",
args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_calls_total", uint64(1747947390000), uint64(1747983420000)},
cte: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`)",
expected: qbtypes.Statement{
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_calls_total", uint64(1747947390000), uint64(1747983420000)},
},
expectedErr: nil,
},
{
name: "histogram_percentile1",
name: "test_histogram_percentile1",
requestType: qbtypes.RequestTypeTimeSeries,
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
@@ -116,38 +139,24 @@ func TestStatementBuilder(t *testing.T) {
Filter: &qbtypes.Filter{
Expression: "service.name = 'cartservice'",
},
Limit: 10,
GroupBy: []qbtypes.GroupByKey{
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
},
},
orderKey: "service.name",
args: []any{"signoz_latency", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_latency", uint64(1747947390000), uint64(1747983420000)},
cte: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`, `le`), __histogram_cte AS (SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts)",
},
{
name: "histogram_percentile2",
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "http_server_duration_bucket",
Type: metrictypes.HistogramType,
Temporality: metrictypes.Cumulative,
TimeAggregation: metrictypes.TimeAggregationRate,
SpaceAggregation: metrictypes.SpaceAggregationPercentile95,
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
},
},
},
GroupBy: []qbtypes.GroupByKey{
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
},
},
orderKey: "service.name",
args: []any{"http_server_duration_bucket", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "http_server_duration_bucket", uint64(1747947360000), uint64(1747983420000), 0},
cte: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, `le`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name`, `le` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, `le`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`, `le`), __histogram_cte AS (SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts)",
expected: qbtypes.Statement{
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts ORDER BY `service.name`, ts",
Args: []any{"signoz_latency", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_latency", uint64(1747947390000), uint64(1747983420000)},
},
expectedErr: nil,
},
{
name: "gauge_avg_sum",
name: "test_gauge_avg_sum",
requestType: qbtypes.RequestTypeTimeSeries,
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
@@ -163,81 +172,51 @@ func TestStatementBuilder(t *testing.T) {
Filter: &qbtypes.Filter{
Expression: "host.name = 'big-data-node-1'",
},
Limit: 10,
GroupBy: []qbtypes.GroupByKey{
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "host.name"}},
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "host.name",
},
},
},
},
orderKey: "host.name",
args: []any{"system.memory.usage", uint64(1747936800000), uint64(1747983420000), "unspecified", false, "big-data-node-1", "system.memory.usage", uint64(1747947390000), uint64(1747983420000), 0},
cte: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `host.name`, avg(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'host.name') AS `host.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'host.name') = ? GROUP BY fingerprint, `host.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`)",
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `host.name`, avg(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'host.name') AS `host.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'host.name') = ? GROUP BY fingerprint, `host.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `host.name`, ts",
Args: []any{"system.memory.usage", uint64(1747936800000), uint64(1747983420000), "unspecified", false, "big-data-node-1", "system.memory.usage", uint64(1747947390000), uint64(1747983420000), 0},
},
expectedErr: nil,
},
{
name: "test_histogram_percentile2",
requestType: qbtypes.RequestTypeTimeSeries,
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "http_server_duration_bucket",
Type: metrictypes.HistogramType,
Temporality: metrictypes.Cumulative,
TimeAggregation: metrictypes.TimeAggregationRate,
SpaceAggregation: metrictypes.SpaceAggregationPercentile95,
},
},
Limit: 10,
GroupBy: []qbtypes.GroupByKey{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
},
},
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, `le`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name`, `le` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, `le`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts ORDER BY `service.name`, ts",
Args: []any{"http_server_duration_bucket", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "http_server_duration_bucket", uint64(1747947360000), uint64(1747983420000), 0},
},
expectedErr: nil,
},
}
type variant struct {
name string
limit int
hasOrder bool
}
variants := []variant{
{"with_limits", 10, false},
{"without_limits", 0, false},
{"with_order_by", 0, true},
{"with_order_by_and_limits", 10, true},
}
sumMetricsFinalSelects := map[string]string{
"with_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
"without_limits": " SELECT * FROM __spatial_aggregation_cte ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
"with_order_by": " SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name` asc, ts ASC",
"with_order_by_and_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY `service.name` asc LIMIT 10) ORDER BY `service.name` asc, ts ASC",
}
histogramMetricsFinalSelects := map[string]string{
"with_limits": " SELECT * FROM __histogram_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __histogram_cte GROUP BY `service.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
"without_limits": " SELECT * FROM __histogram_cte ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
"with_order_by": " SELECT * FROM __histogram_cte ORDER BY `service.name` asc, ts ASC",
"with_order_by_and_limits": " SELECT * FROM __histogram_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __histogram_cte GROUP BY `service.name` ORDER BY `service.name` asc LIMIT 10) ORDER BY `service.name` asc, ts ASC",
}
// expectedFinalSelects maps "base/variant" to the final SELECT portion after the CTE.
// The full expected query is: base.cte + expectedFinalSelects[name]
expectedFinalSelects := map[string]string{
// cumulative_rate_sum
"cumulative_rate_sum/with_limits": sumMetricsFinalSelects["with_limits"],
"cumulative_rate_sum/without_limits": sumMetricsFinalSelects["without_limits"],
"cumulative_rate_sum/with_order_by": sumMetricsFinalSelects["with_order_by"],
"cumulative_rate_sum/with_order_by_and_limits": sumMetricsFinalSelects["with_order_by_and_limits"],
// cumulative_rate_sum_with_mat_column
"cumulative_rate_sum_with_mat_column/with_limits": sumMetricsFinalSelects["with_limits"],
"cumulative_rate_sum_with_mat_column/without_limits": sumMetricsFinalSelects["without_limits"],
"cumulative_rate_sum_with_mat_column/with_order_by": sumMetricsFinalSelects["with_order_by"],
"cumulative_rate_sum_with_mat_column/with_order_by_and_limits": sumMetricsFinalSelects["with_order_by_and_limits"],
// delta_rate_sum
"delta_rate_sum/with_limits": sumMetricsFinalSelects["with_limits"],
"delta_rate_sum/without_limits": sumMetricsFinalSelects["without_limits"],
"delta_rate_sum/with_order_by": sumMetricsFinalSelects["with_order_by"],
"delta_rate_sum/with_order_by_and_limits": sumMetricsFinalSelects["with_order_by_and_limits"],
// histogram_percentile1
"histogram_percentile1/with_limits": histogramMetricsFinalSelects["with_limits"],
"histogram_percentile1/without_limits": histogramMetricsFinalSelects["without_limits"],
"histogram_percentile1/with_order_by": histogramMetricsFinalSelects["with_order_by"],
"histogram_percentile1/with_order_by_and_limits": histogramMetricsFinalSelects["with_order_by_and_limits"],
// histogram_percentile2
"histogram_percentile2/with_limits": histogramMetricsFinalSelects["with_limits"],
"histogram_percentile2/without_limits": histogramMetricsFinalSelects["without_limits"],
"histogram_percentile2/with_order_by": histogramMetricsFinalSelects["with_order_by"],
"histogram_percentile2/with_order_by_and_limits": histogramMetricsFinalSelects["with_order_by_and_limits"],
// gauge_avg_sum
"gauge_avg_sum/with_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`host.name`) IN (SELECT `host.name` FROM __spatial_aggregation_cte GROUP BY `host.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `host.name`) DESC, `host.name`, ts ASC",
"gauge_avg_sum/without_limits": " SELECT * FROM __spatial_aggregation_cte ORDER BY avg(value) OVER (PARTITION BY `host.name`) DESC, `host.name`, ts ASC",
"gauge_avg_sum/with_order_by": " SELECT * FROM __spatial_aggregation_cte ORDER BY `host.name` asc, ts ASC",
"gauge_avg_sum/with_order_by_and_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`host.name`) IN (SELECT `host.name` FROM __spatial_aggregation_cte GROUP BY `host.name` ORDER BY `host.name` asc LIMIT 10) ORDER BY `host.name` asc, ts ASC",
}
fm := NewFieldMapper()
@@ -248,13 +227,15 @@ func TestStatementBuilder(t *testing.T) {
t.Fatalf("failed to load field keys: %v", err)
}
mockMetadataStore.KeysMap = keys
// NOTE: LoadFieldKeysFromJSON doesn't set Materialized field
// for keys, so we have to set it manually here for testing
if _, ok := mockMetadataStore.KeysMap["materialized.key.name"]; ok {
if len(mockMetadataStore.KeysMap["materialized.key.name"]) > 0 {
mockMetadataStore.KeysMap["materialized.key.name"][0].Materialized = true
}
}
fl, err := flagger.New(context.Background(), instrumentationtest.New().ToProviderSettings(), flagger.Config{}, flagger.MustNewRegistry())
flagger, err := flagger.New(context.Background(), instrumentationtest.New().ToProviderSettings(), flagger.Config{}, flagger.MustNewRegistry())
if err != nil {
t.Fatalf("failed to create flagger: %v", err)
}
@@ -264,30 +245,23 @@ func TestStatementBuilder(t *testing.T) {
mockMetadataStore,
fm,
cb,
fl,
flagger,
)
for _, b := range bases {
for _, v := range variants {
name := b.name + "/" + v.name
t.Run(name, func(t *testing.T) {
q := b.query
q.Limit = v.limit
if v.hasOrder {
q.Order = []qbtypes.OrderBy{
{
Key: qbtypes.OrderByKey{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: b.orderKey}},
Direction: qbtypes.OrderDirectionAsc,
},
}
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
result, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, qbtypes.RequestTypeTimeSeries, q, nil)
q, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, c.requestType, c.query, nil)
if c.expectedErr != nil {
require.Error(t, err)
require.Contains(t, err.Error(), c.expectedErr.Error())
} else {
require.NoError(t, err)
require.Equal(t, b.cte+expectedFinalSelects[name], result.Query)
require.Equal(t, b.args, result.Args)
})
}
require.Equal(t, c.expected.Query, q.Query)
require.Equal(t, c.expected.Args, q.Args)
require.Equal(t, c.expected.Warnings, q.Warnings)
}
})
}
}

View File

@@ -58,8 +58,6 @@ def build_builder_query(
step_interval: int = DEFAULT_STEP_INTERVAL,
group_by: Optional[List[str]] = None,
filter_expression: Optional[str] = None,
order_by: Optional[List[Dict]] = None,
limit: Optional[int] = None,
functions: Optional[List[Dict]] = None,
disabled: bool = False,
) -> Dict:
@@ -95,12 +93,6 @@ def build_builder_query(
if filter_expression:
spec["filter"] = {"expression": filter_expression}
if order_by:
spec["order"] = order_by
if limit is not None:
spec["limit"] = limit
if functions:
spec["functions"] = functions

View File

@@ -2,20 +2,16 @@
Look at the cumulative_counters_1h.jsonl file for the relevant data
"""
import logging
import os
from datetime import datetime, timedelta, timezone
from http import HTTPStatus
from typing import Any, Callable, List, Optional, Union
import pytest
from typing import Any, Callable, List
from fixtures import types
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.metrics import Metrics
from fixtures.querier import (
build_builder_query,
build_order_by,
get_all_series,
get_series_values,
make_query_request,
@@ -75,200 +71,16 @@ def test_rate_with_steady_values_and_reset(
assert v["value"] >= 0, f"Rate should not be negative: {v['value']}"
def _assert_endpoint_rate_values(endpoint_values: dict) -> None:
# /health: 60 data points (t01-t60), steady +10/min
# rate = 10/60 = 0.167
if "/health" in endpoint_values:
health_values = endpoint_values["/health"]
assert (
len(health_values) >= 58
), f"Expected >= 58 values for /health, got {len(health_values)}"
count_steady_health = sum(1 for v in health_values if v["value"] == 0.167)
assert (
count_steady_health >= 57
), f"Expected >= 57 steady rate values (0.167) for /health, got {count_steady_health}"
# all /health rates should be 0.167 except possibly first/last due to boundaries
for v in health_values[1:-1]:
assert v["value"] == 0.167, f"Expected /health rate 0.167, got {v['value']}"
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
# rate = 20/60 = 0.333, gap causes lower averaged rate at boundary
if "/products" in endpoint_values:
products_values = endpoint_values["/products"]
assert (
len(products_values) >= 49
), f"Expected >= 49 values for /products, got {len(products_values)}"
count_steady_products = sum(1 for v in products_values if v["value"] == 0.333)
# most values should be 0.333, some boundary values differ due to 10-min gap
assert (
count_steady_products >= 46
), f"Expected >= 46 steady rate values (0.333) for /products, got {count_steady_products}"
# check that non-0.333 values are due to gap averaging (should be lower)
gap_boundary_values = [v["value"] for v in products_values if v["value"] != 0.333]
for val in gap_boundary_values:
assert (
0 < val < 0.333
), f"Gap boundary values should be between 0 and 0.333, got {val}"
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
# normal rate = 1/60 = 0.0167, spike rate = 50/60 = 0.833
if "/checkout" in endpoint_values:
checkout_values = endpoint_values["/checkout"]
assert (
len(checkout_values) >= 59
), f"Expected >= 59 values for /checkout, got {len(checkout_values)}"
count_steady_checkout = sum(1 for v in checkout_values if v["value"] == 0.0167)
assert (
count_steady_checkout >= 53
), f"Expected >= 53 steady rate values (0.0167) for /checkout, got {count_steady_checkout}"
# check that spike values exist (traffic spike +50/min at t40-t44)
count_spike_checkout = sum(1 for v in checkout_values if v["value"] == 0.833)
assert (
count_spike_checkout >= 4
), f"Expected >= 4 spike rate values (0.833) for /checkout, got {count_spike_checkout}"
# spike values should be consecutive
spike_indices = [
i for i, v in enumerate[Any](checkout_values) if v["value"] == 0.833
]
assert len(spike_indices) >= 4, f"Expected >= 4 spike indices, got {spike_indices}"
for i in range(1, len(spike_indices)):
assert (
spike_indices[i] == spike_indices[i - 1] + 1
), f"Spike indices should be consecutive, got {spike_indices}"
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
# rate = 5/60 = 0.0833
# reset at t31 causes: rate at t30 includes gap (lower), t31 has high rate after reset
if "/orders" in endpoint_values:
orders_values = endpoint_values["/orders"]
assert (
len(orders_values) >= 58
), f"Expected >= 58 values for /orders, got {len(orders_values)}"
count_steady_orders = sum(1 for v in orders_values if v["value"] == 0.0833)
assert (
count_steady_orders >= 55
), f"Expected >= 55 steady rate values (0.0833) for /orders, got {count_steady_orders}"
# check for counter reset effects - there should be some non-standard values
non_standard_orders = [v["value"] for v in orders_values if v["value"] != 0.0833]
assert (
len(non_standard_orders) >= 2
), f"Expected >= 2 non-standard values due to counter reset, got {non_standard_orders}"
# post-reset value should be higher (new counter value / interval)
high_rate_orders = [v for v in non_standard_orders if v > 0.0833]
assert (
len(high_rate_orders) >= 1
), f"Expected at least one high rate value after counter reset, got {non_standard_orders}"
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes
# Rate = 1/60 = 0.0167 during increment, 0 during flat periods
if "/users" in endpoint_values:
users_values = endpoint_values["/users"]
assert (
len(users_values) >= 54
), f"Expected >= 54 values for /users, got {len(users_values)}"
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
# most values should be 0 (flat periods between increments)
assert (
count_zero_users >= 40
), f"Expected >= 40 zero rate values for /users (sparse data), got {count_zero_users}"
# non-zero values should be 0.0167 (1/60 increment rate)
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
count_increment_rate = sum(1 for v in non_zero_users if v == 0.0167)
assert (
count_increment_rate >= 8
), f"Expected >= 8 increment rate values (0.0167) for /users, got {count_increment_rate}"
@pytest.mark.parametrize(
"metric_suffix,order_by,limit,expected_count,expected_endpoints",
[
(
"no_order",
None, # this is equivalent to sum(metric_name)
None,
5,
["/products", "/health", "/checkout", "/orders", "/users"],
),
(
"only_limit",
None, # this is equivalent to sum(metric_name)
3,
3,
["/products", "/health", "/checkout"],
),
(
"asc",
[build_order_by("endpoint", "asc")],
None,
5,
["/checkout", "/health", "/orders", "/products", "/users"],
),
(
"asc_lim3",
[build_order_by("endpoint", "asc")],
3,
3,
["/checkout", "/health", "/orders"],
),
(
"desc",
[build_order_by("endpoint", "desc")],
None,
5,
["/users", "/products", "/orders", "/health", "/checkout"],
),
(
"desc_lim3",
[build_order_by("endpoint", "desc")],
3,
3,
["/users", "/products", "/orders"],
),
(
"asc_metric_name",
[build_order_by("sum(test_rate_groupby_asc_metric_name)", "asc")],
None,
5,
["/users", "/orders", "/checkout", "/health", "/products"],
),
(
"asc_metric_name_lim3",
[build_order_by("sum(test_rate_groupby_asc_metric_name_lim3)", "asc")],
3,
3,
["/users", "/orders", "/checkout"],
),
(
"desc_metric_name",
[build_order_by("sum(test_rate_groupby_desc_metric_name)", "desc")],
None,
5,
["/products", "/health", "/checkout", "/orders", "/users"],
),
(
"desc_metric_name_lim3",
[build_order_by("sum(test_rate_groupby_desc_metric_name_lim3)", "desc")],
3,
3,
["/products", "/health", "/checkout"],
),
],
)
def test_rate_group_by_endpoint(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_metrics: Callable[[List[Metrics]], None],
metric_suffix: str,
order_by: Optional[List],
limit: Optional[int],
expected_count: int,
expected_endpoints: Union[set, List[str]],
) -> None:
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
metric_name = f"test_rate_groupby_{metric_suffix}"
metric_name = "test_rate_groupby"
metrics = Metrics.load_from_file(
CUMULATIVE_COUNTERS_FILE,
@@ -285,8 +97,6 @@ def test_rate_group_by_endpoint(
"sum",
temporality="cumulative",
group_by=["endpoint"],
order_by=order_by,
limit=limit,
)
response = make_query_request(signoz, token, start_ms, end_ms, [query])
@@ -295,23 +105,10 @@ def test_rate_group_by_endpoint(
data = response.json()
all_series = get_all_series(data, "A")
# Should have 5 different endpoints
assert (
len(all_series) == expected_count
), f"Expected {expected_count} series, got {len(all_series)}"
endpoint_labels = [
series.get("labels", [{}])[0].get("value", "unknown")
for series in all_series
]
if isinstance(expected_endpoints, set):
assert (
set(endpoint_labels) == expected_endpoints
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_labels)}"
else:
assert endpoint_labels == expected_endpoints, (
f"Expected endpoints {expected_endpoints}, got {endpoint_labels}"
)
len(all_series) == 5
), f"Expected 5 series for 5 endpoints, got {len(all_series)}"
# endpoint -> values
endpoint_values = {}
@@ -320,6 +117,11 @@ def test_rate_group_by_endpoint(
values = sorted(series.get("values", []), key=lambda x: x["timestamp"])
endpoint_values[endpoint] = values
expected_endpoints = {"/products", "/health", "/checkout", "/orders", "/users"}
assert (
set(endpoint_values.keys()) == expected_endpoints
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_values.keys())}"
# at no point rate should be negative
for endpoint, values in endpoint_values.items():
for v in values:
@@ -327,4 +129,103 @@ def test_rate_group_by_endpoint(
v["value"] >= 0
), f"Rate for {endpoint} should not be negative: {v['value']}"
_assert_endpoint_rate_values(endpoint_values)
# /health: 60 data points (t01-t60), steady +10/min
# rate = 10/60 = 0.167
health_values = endpoint_values["/health"]
assert (
len(health_values) >= 58
), f"Expected >= 58 values for /health, got {len(health_values)}"
count_steady_health = sum(1 for v in health_values if v["value"] == 0.167)
assert (
count_steady_health >= 57
), f"Expected >= 57 steady rate values (0.167) for /health, got {count_steady_health}"
# all /health rates should be 0.167 except possibly first/last due to boundaries
for v in health_values[1:-1]:
assert v["value"] == 0.167, f"Expected /health rate 0.167, got {v['value']}"
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
# rate = 20/60 = 0.333, gap causes lower averaged rate at boundary
products_values = endpoint_values["/products"]
assert (
len(products_values) >= 49
), f"Expected >= 49 values for /products, got {len(products_values)}"
count_steady_products = sum(1 for v in products_values if v["value"] == 0.333)
# most values should be 0.333, some boundary values differ due to 10-min gap
assert (
count_steady_products >= 46
), f"Expected >= 46 steady rate values (0.333) for /products, got {count_steady_products}"
# check that non-0.333 values are due to gap averaging (should be lower)
gap_boundary_values = [v["value"] for v in products_values if v["value"] != 0.333]
for val in gap_boundary_values:
assert (
0 < val < 0.333
), f"Gap boundary values should be between 0 and 0.333, got {val}"
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
# normal rate = 1/60 = 0.0167, spike rate = 50/60 = 0.833
checkout_values = endpoint_values["/checkout"]
assert (
len(checkout_values) >= 59
), f"Expected >= 59 values for /checkout, got {len(checkout_values)}"
count_steady_checkout = sum(1 for v in checkout_values if v["value"] == 0.0167)
assert (
count_steady_checkout >= 53
), f"Expected >= 53 steady rate values (0.0167) for /checkout, got {count_steady_checkout}"
# check that spike values exist (traffic spike +50/min at t40-t44)
count_spike_checkout = sum(1 for v in checkout_values if v["value"] == 0.833)
assert (
count_spike_checkout >= 4
), f"Expected >= 4 spike rate values (0.833) for /checkout, got {count_spike_checkout}"
# spike values should be consecutive
spike_indices = [
i for i, v in enumerate[Any](checkout_values) if v["value"] == 0.833
]
assert len(spike_indices) >= 4, f"Expected >= 4 spike indices, got {spike_indices}"
# consecutiveness
for i in range(1, len(spike_indices)):
assert (
spike_indices[i] == spike_indices[i - 1] + 1
), f"Spike indices should be consecutive, got {spike_indices}"
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
# rate = 5/60 = 0.0833
# reset at t31 causes: rate at t30 includes gap (lower), t31 has high rate after reset
orders_values = endpoint_values["/orders"]
assert (
len(orders_values) >= 58
), f"Expected >= 58 values for /orders, got {len(orders_values)}"
count_steady_orders = sum(1 for v in orders_values if v["value"] == 0.0833)
assert (
count_steady_orders >= 55
), f"Expected >= 55 steady rate values (0.0833) for /orders, got {count_steady_orders}"
# check for counter reset effects - there should be some non-standard values
non_standard_orders = [v["value"] for v in orders_values if v["value"] != 0.0833]
assert (
len(non_standard_orders) >= 2
), f"Expected >= 2 non-standard values due to counter reset, got {non_standard_orders}"
# post-reset value should be higher (new counter value / interval)
high_rate_orders = [v for v in non_standard_orders if v > 0.0833]
assert (
len(high_rate_orders) >= 1
), f"Expected at least one high rate value after counter reset, got {non_standard_orders}"
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes
# Rate = 1/60 = 0.0167 during increment, 0 during flat periods
users_values = endpoint_values["/users"]
assert (
len(users_values) >= 54
), f"Expected >= 54 values for /users, got {len(users_values)}"
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
# most values should be 0 (flat periods between increments)
assert (
count_zero_users >= 40
), f"Expected >= 40 zero rate values for /users (sparse data), got {count_zero_users}"
# non-zero values should be 0.0167 (1/60 increment rate)
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
count_increment_rate = sum(1 for v in non_zero_users if v == 0.0167)
assert (
count_increment_rate >= 8
), f"Expected >= 8 increment rate values (0.0167) for /users, got {count_increment_rate}"

View File

@@ -5,7 +5,7 @@ Look at the multi_temporality_counters_1h.jsonl file for the relevant data
import random
from datetime import datetime, timedelta, timezone
from http import HTTPStatus
from typing import Callable, List, Optional, Union
from typing import Callable, List
import pytest
@@ -14,7 +14,6 @@ from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.metrics import Metrics
from fixtures.querier import (
build_builder_query,
build_order_by,
get_all_series,
get_series_values,
make_query_request,
@@ -92,198 +91,6 @@ def test_with_steady_values_and_reset(
), f"{time_aggregation} should not be negative: {v['value']}"
def _assert_endpoint_group_values( # pylint: disable=too-many-arguments
endpoint_values: dict,
stable_health_value: float,
stable_products_value: float,
stable_checkout_value: float,
spike_checkout_value: float,
stable_orders_value: float,
spike_users_value: float,
time_aggregation: str,
) -> None:
# /health: 60 data points (t01-t60), steady +10/min
if "/health" in endpoint_values:
health_values = endpoint_values["/health"]
assert (
len(health_values) >= 58
), f"Expected >= 58 values for /health, got {len(health_values)}"
count_steady_health = sum(
1 for v in health_values if v["value"] == stable_health_value
)
assert (
count_steady_health >= 57
), f"Expected >= 57 steady rate values ({stable_health_value}) for /health, got {count_steady_health}"
# all /health rates should be stable except possibly first/last due to boundaries
for v in health_values[1:-1]:
assert (
v["value"] == stable_health_value
), f"Expected /health rate {stable_health_value}, got {v['value']}"
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
if "/products" in endpoint_values:
products_values = endpoint_values["/products"]
assert (
len(products_values) >= 49
), f"Expected >= 49 values for /products, got {len(products_values)}"
count_steady_products = sum(
1 for v in products_values if v["value"] == stable_products_value
)
# most values should be stable, some boundary values differ due to 10-min gap
assert (
count_steady_products >= 46
), f"Expected >= 46 steady rate values ({stable_products_value}) for /products, got {count_steady_products}"
# check that non-stable values are due to gap averaging (should be lower)
gap_boundary_values = [
v["value"] for v in products_values if v["value"] != stable_products_value
]
for val in gap_boundary_values:
assert (
0 < val < stable_products_value
), f"Gap boundary values should be between 0 and {stable_products_value}, got {val}"
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
if "/checkout" in endpoint_values:
checkout_values = endpoint_values["/checkout"]
assert (
len(checkout_values) >= 59
), f"Expected >= 59 values for /checkout, got {len(checkout_values)}"
count_steady_checkout = sum(
1 for v in checkout_values if v["value"] == stable_checkout_value
)
assert (
count_steady_checkout >= 53
), f"Expected >= 53 steady {time_aggregation} values ({stable_checkout_value}) for /checkout, got {count_steady_checkout}"
# check that spike values exist (traffic spike +50/min at t40-t44)
count_spike_checkout = sum(
1 for v in checkout_values if v["value"] == spike_checkout_value
)
assert (
count_spike_checkout >= 4
), f"Expected >= 4 spike {time_aggregation} values ({spike_checkout_value}) for /checkout, got {count_spike_checkout}"
# spike values should be consecutive
spike_indices = [
i for i, v in enumerate(checkout_values) if v["value"] == spike_checkout_value
]
assert len(spike_indices) >= 4, f"Expected >= 4 spike indices, got {spike_indices}"
for i in range(1, len(spike_indices)):
assert (
spike_indices[i] == spike_indices[i - 1] + 1
), f"Spike indices should be consecutive, got {spike_indices}"
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
# reset at t31 causes: rate/increase at t30 includes gap (lower), t31 has high rate after reset
if "/orders" in endpoint_values:
orders_values = endpoint_values["/orders"]
assert (
len(orders_values) >= 58
), f"Expected >= 58 values for /orders, got {len(orders_values)}"
count_steady_orders = sum(
1 for v in orders_values if v["value"] == stable_orders_value
)
assert (
count_steady_orders >= 55
), f"Expected >= 55 steady {time_aggregation} values ({stable_orders_value}) for /orders, got {count_steady_orders}"
# check for counter reset effects - there should be some non-standard values
non_standard_orders = [
v["value"] for v in orders_values if v["value"] != stable_orders_value
]
assert (
len(non_standard_orders) >= 2
), f"Expected >= 2 non-standard values due to counter reset, got {non_standard_orders}"
# post-reset value should be higher (new counter value / interval)
high_rate_orders = [v for v in non_standard_orders if v > stable_orders_value]
assert (
len(high_rate_orders) >= 1
), f"Expected at least one high {time_aggregation} value after counter reset, got {non_standard_orders}"
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes
if "/users" in endpoint_values:
users_values = endpoint_values["/users"]
assert (
len(users_values) >= 54
), f"Expected >= 54 values for /users, got {len(users_values)}"
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
# most values should be 0 (flat periods between increments)
assert (
count_zero_users >= 40
), f"Expected >= 40 zero {time_aggregation} values for /users (sparse data), got {count_zero_users}"
# non-zero values should be stable increment rate
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
count_increment_rate = sum(1 for v in non_zero_users if v == spike_users_value)
assert (
count_increment_rate >= 8
), f"Expected >= 8 increment {time_aggregation} values ({spike_users_value}) for /users, got {count_increment_rate}"
@pytest.mark.parametrize(
"order_suffix,order_by_spec,limit,expected_count,expected_endpoints",
[
(
"no_order",
None,
None,
5,
["/products", "/health", "/checkout", "/orders", "/users"],
),
(
"asc",
("endpoint", "asc"),
None,
5,
["/checkout", "/health", "/orders", "/products", "/users"],
),
(
"asc_lim3",
("endpoint", "asc"),
3,
3,
["/checkout", "/health", "/orders"],
),
(
"desc",
("endpoint", "desc"),
None,
5,
["/users", "/products", "/orders", "/health", "/checkout"],
),
(
"desc_lim3",
("endpoint", "desc"),
3,
3,
["/users", "/products", "/orders"],
),
(
"asc_metric_name",
("sum_metric", "asc"),
None,
5,
["/users", "/orders", "/checkout", "/health", "/products"],
),
(
"asc_metric_name_lim3",
("sum_metric", "asc"),
3,
3,
["/users", "/orders", "/checkout"],
),
(
"desc_metric_name",
("sum_metric", "desc"),
None,
5,
["/products", "/health", "/checkout", "/orders", "/users"],
),
(
"desc_metric_name_lim3",
("sum_metric", "desc"),
3,
3,
["/products", "/health", "/checkout"],
),
],
)
@pytest.mark.parametrize(
"time_aggregation, stable_health_value, stable_products_value, stable_checkout_value, spike_checkout_value, stable_orders_value, spike_users_value",
[
@@ -303,24 +110,11 @@ def test_group_by_endpoint(
spike_checkout_value: float,
stable_orders_value: float,
spike_users_value: float,
order_suffix: str,
order_by_spec: Optional[tuple],
limit: Optional[int],
expected_count: int,
expected_endpoints: Union[set, List[str]],
) -> None:
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
metric_name = f"test_{time_aggregation}_groupby_{order_suffix}"
# Build order_by at runtime so metric name reflects actual time_aggregation
order_by = None
if order_by_spec is not None:
key, direction = order_by_spec
if key == "sum_metric":
key = f"sum({metric_name})"
order_by = [build_order_by(key, direction)]
metric_name = f"test_{time_aggregation}_groupby"
metrics = Metrics.load_from_file(
MULTI_TEMPORALITY_FILE,
@@ -336,8 +130,6 @@ def test_group_by_endpoint(
time_aggregation,
"sum",
group_by=["endpoint"],
order_by=order_by,
limit=limit,
)
response = make_query_request(signoz, token, start_ms, end_ms, [query])
@@ -345,23 +137,10 @@ def test_group_by_endpoint(
data = response.json()
all_series = get_all_series(data, "A")
# Should have 5 different endpoints
assert (
len(all_series) == expected_count
), f"Expected {expected_count} series, got {len(all_series)}"
endpoint_labels = [
series.get("labels", [{}])[0].get("value", "unknown")
for series in all_series
]
if isinstance(expected_endpoints, set):
assert (
set(endpoint_labels) == expected_endpoints
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_labels)}"
else:
assert endpoint_labels == expected_endpoints, (
f"Expected endpoints {expected_endpoints}, got {endpoint_labels}"
)
len(all_series) == 5
), f"Expected 5 series for 5 endpoints, got {len(all_series)}"
# endpoint -> values
endpoint_values = {}
@@ -370,6 +149,11 @@ def test_group_by_endpoint(
values = sorted(series.get("values", []), key=lambda x: x["timestamp"])
endpoint_values[endpoint] = values
expected_endpoints = {"/products", "/health", "/checkout", "/orders", "/users"}
assert (
set(endpoint_values.keys()) == expected_endpoints
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_values.keys())}"
# at no point rate should be negative
for endpoint, values in endpoint_values.items():
for v in values:
@@ -377,16 +161,117 @@ def test_group_by_endpoint(
v["value"] >= 0
), f"Rate for {endpoint} should not be negative: {v['value']}"
_assert_endpoint_group_values(
endpoint_values,
stable_health_value,
stable_products_value,
stable_checkout_value,
spike_checkout_value,
stable_orders_value,
spike_users_value,
time_aggregation,
# /health: 60 data points (t01-t60), steady +10/min
health_values = endpoint_values["/health"]
assert (
len(health_values) >= 58
), f"Expected >= 58 values for /health, got {len(health_values)}"
count_steady_health = sum(
1 for v in health_values if v["value"] == stable_health_value
)
assert (
count_steady_health >= 57
), f"Expected >= 57 steady rate values ({stable_health_value}) for /health, got {count_steady_health}"
# all /health rates should be state except possibly first/last due to boundaries
for v in health_values[1:-1]:
assert (
v["value"] == stable_health_value
), f"Expected /health rate {stable_health_value}, got {v['value']}"
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
products_values = endpoint_values["/products"]
assert (
len(products_values) >= 49
), f"Expected >= 49 values for /products, got {len(products_values)}"
count_steady_products = sum(
1 for v in products_values if v["value"] == stable_products_value
)
# most values should be stable, some boundary values differ due to 10-min gap
assert (
count_steady_products >= 46
), f"Expected >= 46 steady rate values ({stable_products_value}) for /products, got {count_steady_products}"
# check that non-stable values are due to gap averaging (should be lower)
gap_boundary_values = [
v["value"] for v in products_values if v["value"] != stable_products_value
]
for val in gap_boundary_values:
assert (
0 < val < stable_products_value
), f"Gap boundary values should be between 0 and {stable_products_value}, got {val}"
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
checkout_values = endpoint_values["/checkout"]
assert (
len(checkout_values) >= 59
), f"Expected >= 59 values for /checkout, got {len(checkout_values)}"
count_steady_checkout = sum(
1 for v in checkout_values if v["value"] == stable_checkout_value
)
assert (
count_steady_checkout >= 53
), f"Expected >= 53 steady {time_aggregation} values ({stable_checkout_value}) for /checkout, got {count_steady_checkout}"
# check that spike values exist (traffic spike +50/min at t40-t44)
count_spike_checkout = sum(
1 for v in checkout_values if v["value"] == spike_checkout_value
)
assert (
count_spike_checkout >= 4
), f"Expected >= 4 spike {time_aggregation} values ({spike_checkout_value}) for /checkout, got {count_spike_checkout}"
# spike values should be consecutive
spike_indices = [
i for i, v in enumerate(checkout_values) if v["value"] == spike_checkout_value
]
assert len(spike_indices) >= 4, f"Expected >= 4 spike indices, got {spike_indices}"
# consecutiveness
for i in range(1, len(spike_indices)):
assert (
spike_indices[i] == spike_indices[i - 1] + 1
), f"Spike indices should be consecutive, got {spike_indices}"
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
# reset at t31 causes: rate/increase at t30 includes gap (lower), t31 has high rate after reset
orders_values = endpoint_values["/orders"]
assert (
len(orders_values) >= 58
), f"Expected >= 58 values for /orders, got {len(orders_values)}"
count_steady_orders = sum(
1 for v in orders_values if v["value"] == stable_orders_value
)
assert (
count_steady_orders >= 55
), f"Expected >= 55 steady {time_aggregation} values ({stable_orders_value}) for /orders, got {count_steady_orders}"
# check for counter reset effects - there should be some non-standard values
non_standard_orders = [
v["value"] for v in orders_values if v["value"] != stable_orders_value
]
assert (
len(non_standard_orders) >= 2
), f"Expected >= 2 non-standard values due to counter reset, got {non_standard_orders}"
# post-reset value should be higher (new counter value / interval)
high_rate_orders = [v for v in non_standard_orders if v > stable_orders_value]
assert (
len(high_rate_orders) >= 1
), f"Expected at least one high {time_aggregation} value after counter reset, got {non_standard_orders}"
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes
users_values = endpoint_values["/users"]
assert (
len(users_values) >= 54
), f"Expected >= 54 values for /users, got {len(users_values)}"
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
# most values should be 0 (flat periods between increments)
assert (
count_zero_users >= 40
), f"Expected >= 40 zero {time_aggregation} values for /users (sparse data), got {count_zero_users}"
# non-zero values should be 0.0167 (1/60 increment rate)
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
count_increment_rate = sum(1 for v in non_zero_users if v == spike_users_value)
assert (
count_increment_rate >= 8
), f"Expected >= 8 increment {time_aggregation} values ({spike_users_value}) for /users, got {count_increment_rate}"
@pytest.mark.parametrize(

View File

@@ -2,12 +2,9 @@
Look at the histogram_data_1h.jsonl file for the relevant data
"""
import logging
from datetime import datetime, timedelta, timezone
from http import HTTPStatus
from typing import Callable, List, Optional, Union
logger = logging.getLogger(__name__)
from typing import Callable, List
import pytest
@@ -16,7 +13,6 @@ from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.metrics import Metrics
from fixtures.querier import (
build_builder_query,
build_order_by,
get_all_series,
get_series_values,
make_query_request,
@@ -24,7 +20,6 @@ from fixtures.querier import (
from fixtures.utils import get_testdata_file_path
FILE = get_testdata_file_path("histogram_data_1h.jsonl")
FILE_WITH_MANY_GROUPS = get_testdata_file_path("histogram_data_1h_many_groups.jsonl")
@pytest.mark.parametrize(
@@ -526,568 +521,4 @@ def test_histogram_percentile_for_delta_service(
assert len(result_values) == 60
assert result_values[0]["value"] == zeroth_value
assert result_values[1]["value"] == first_value
assert result_values[-1]["value"] == last_value
def _assert_series_endpoint_labels(
series: list,
expected_endpoints: Union[set, List[str]],
prefix: str,
) -> None:
labels = [s.get("labels", [{}])[0].get("value", "unknown") for s in series]
if isinstance(expected_endpoints, set):
assert (
set(labels) == expected_endpoints
), f"Expected {prefix} endpoints {expected_endpoints}, got {set(labels)}"
else:
assert labels == expected_endpoints, (
f"Expected {prefix} endpoints in order {expected_endpoints}, got {labels}"
)
@pytest.mark.parametrize(
"order_suffix,order_by,limit,expected_count,expected_endpoints",
[
(
"no_order",
None,
None,
3,
["/checkout", "/health", "/orders"],
),
(
"asc",
[build_order_by("endpoint", "asc")],
None,
3,
["/checkout", "/health", "/orders"],
),
(
"asc_lim2",
[build_order_by("endpoint", "asc")],
2,
2,
["/checkout", "/health"],
),
(
"desc",
[build_order_by("endpoint", "desc")],
None,
3,
["/orders", "/health", "/checkout"],
),
(
"desc_lim2",
[build_order_by("endpoint", "desc")],
2,
2,
["/orders", "/health"],
),
(
"asc_metric_name",
[build_order_by("count(test_histogram_count_groupby_asc_metric_name)", "asc")],
None,
3,
["/health", "/orders", "/checkout"], ## health and orders have the same size so they are then sorted endpoint as a tiebreaker
),
(
"asc_metric_name_lim2",
[build_order_by("count(test_histogram_count_groupby_asc_metric_name_lim2)", "asc")],
2,
2,
["/health", "/orders"],
),
(
"desc_metric_name",
[build_order_by("count(test_histogram_count_groupby_desc_metric_name)", "desc")],
None,
3,
["/checkout", "/health", "/orders"], ## health and orders have the same size so they are then sorted endpoint as a tiebreaker
),
(
"desc_metric_name_lim2",
[build_order_by("count(test_histogram_count_groupby_desc_metric_name_lim2)", "desc")],
2,
2,
["/checkout", "/health"],
),
],
)
def test_histogram_count_group_by_endpoint(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_metrics: Callable[[List[Metrics]], None],
order_suffix: str,
order_by: Optional[List],
limit: Optional[int],
expected_count: int,
expected_endpoints: Union[set, List[str]],
) -> None:
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
metric_name = f"test_histogram_count_groupby_{order_suffix}"
metrics = Metrics.load_from_file(
FILE,
base_time=now - timedelta(minutes=60),
metric_name_override=metric_name,
)
insert_metrics(metrics)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
query_count = build_builder_query(
"A",
metric_name,
"increase",
"count",
comparisonSpaceAggregationParam={"threshold": 1000, "operator": "<="},
group_by=["endpoint"],
order_by=order_by,
limit=limit,
)
response = make_query_request(signoz, token, start_ms, end_ms, [query_count])
assert response.status_code == HTTPStatus.OK
data = response.json()
count_all_series = get_all_series(data, "A")
assert (
len(count_all_series) == expected_count
), f"Expected {expected_count} series, got {len(count_all_series)}"
_assert_series_endpoint_labels(count_all_series, expected_endpoints, "count")
count_values = {}
for series in count_all_series:
endpoint = series.get("labels", [{}])[0].get("value", "unknown")
count_values[endpoint] = sorted(
series.get("values", []), key=lambda x: x["timestamp"]
)
for endpoint, values in count_values.items():
for v in values:
assert v["value"] >= 0, f"Count for {endpoint} should not be negative: {v['value']}"
# /health (cumulative, service=api): 59 points, increase starts at 11/min → 69/min
if "/health" in count_values:
vals = count_values["/health"]
assert vals[0]["value"] == 11, f"Expected /health count first=11, got {vals[0]['value']}"
assert vals[-1]["value"] == 69, f"Expected /health count last=69, got {vals[-1]['value']}"
# /orders (cumulative, service=api): same distribution as /health
if "/orders" in count_values:
vals = count_values["/orders"]
assert vals[0]["value"] == 11, f"Expected /orders count first=11, got {vals[0]['value']}"
assert vals[-1]["value"] == 69, f"Expected /orders count last=69, got {vals[-1]['value']}"
# /checkout (delta, service=web): 60 points, zeroth=12345 (raw delta), then 11/min → 69/min
if "/checkout" in count_values:
vals = count_values["/checkout"]
assert vals[0]["value"] == 12345, f"Expected /checkout count zeroth=12345, got {vals[0]['value']}"
assert vals[1]["value"] == 11, f"Expected /checkout count first=11, got {vals[1]['value']}"
assert vals[-1]["value"] == 69, f"Expected /checkout count last=69, got {vals[-1]['value']}"
@pytest.mark.parametrize(
"order_suffix,order_by,limit,expected_count,expected_endpoints",
[
(
"no_order",
None,
None,
4,
[ "/checkout", "/health", "/orders", "/coupon"],
),
(
"only_limit",
None,
1,
1,
[ "/checkout"], ##health and checkout have the same size so they are then sorted endpoint as a tiebreaker, and only checkout makes the limit
),
(
"asc",
[build_order_by("endpoint", "asc")],
None,
4,
[ "/checkout", "/coupon", "/health", "/orders"],
),
(
"asc_lim2",
[build_order_by("endpoint", "asc")],
2,
2,
["/checkout", "/coupon"],
),
(
"desc",
[build_order_by("endpoint", "desc")],
None,
4,
["/orders", "/health", "/coupon", "/checkout"],
),
(
"desc_lim2",
[build_order_by("endpoint", "desc")],
2,
2,
["/orders", "/health"],
),
(
"asc_metric_name",
[build_order_by("p75(test_histogram_p75_groupby_asc_metric_name)", "asc")],
None,
4,
["/coupon", "/orders", "/checkout", "/health"], ## health and checkout have the same size so they are then sorted endpoint as a tiebreaker
),
(
"asc_metric_name_lim2",
[build_order_by("p75(test_histogram_p75_groupby_asc_metric_name_lim2)", "asc")],
2,
2,
["/coupon", "/orders"],
),
(
"asc_metric_name_lim3",
[build_order_by("p75(test_histogram_p75_groupby_asc_metric_name_lim3)", "asc")],
3,
3,
["/coupon", "/orders", "/checkout"], ##health and checkout have the same size so they are then sorted endpoint as a tiebreaker, and only checkout makes the limit
),
(
"desc_metric_name",
[build_order_by("p75(test_histogram_p75_groupby_desc_metric_name)", "desc")],
None,
4,
[ "/checkout", "/health", "/orders", "/coupon"],
),
(
"desc_metric_name_lim2",
[build_order_by("p75(test_histogram_p75_groupby_desc_metric_name_lim2)", "desc")],
2,
2,
[ "/checkout", "/health"],
),
(
"desc_metric_name_lim2",
[build_order_by("p75(test_histogram_p75_groupby_desc_metric_name_lim2)", "desc")],
1,
1,
[ "/checkout"], ##health and checkout have the same size so they are then sorted endpoint as a tiebreaker, and only checkout makes the limit
),
],
)
def test_histogram_percentile_group_by_endpoint(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_metrics: Callable[[List[Metrics]], None],
order_suffix: str,
order_by: Optional[List],
limit: Optional[int],
expected_count: int,
expected_endpoints: Union[set, List[str]],
) -> None:
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
metric_name = f"test_histogram_p75_groupby_{order_suffix}"
metrics = Metrics.load_from_file(
FILE_WITH_MANY_GROUPS,
base_time=now - timedelta(minutes=60),
metric_name_override=metric_name,
)
insert_metrics(metrics)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
query_p75 = build_builder_query(
"A",
metric_name,
"doesnotreallymatter",
"p75",
group_by=["endpoint"],
order_by=order_by,
limit=limit,
)
response = make_query_request(signoz, token, start_ms, end_ms, [query_p75])
assert response.status_code == HTTPStatus.OK
data = response.json()
p75_series = get_all_series(data, "A")
for series in p75_series:
endpoint = series.get("labels", [{}])[0].get("value", "unknown")
values = series.get("values", [])
assert (
len(p75_series) == expected_count
), f"Expected {expected_count} p75 series, got {len(p75_series)}"
_assert_series_endpoint_labels(p75_series, expected_endpoints, "p75")
p75_values = {}
for series in p75_series:
endpoint = series.get("labels", [{}])[0].get("value", "unknown")
p75_values[endpoint] = sorted(
series.get("values", []), key=lambda x: x["timestamp"]
)
for endpoint, values in p75_values.items():
for v in values:
assert v["value"] >= 0, f"p75 for {endpoint} should not be negative: {v['value']}"
# /health (cumulative, service=api)
if "/health" in p75_values:
vals = p75_values["/health"]
assert vals[0]["value"] == 6000, f"Expected /health p75 first=8000, got {vals[0]['value']}"
assert vals[-1]["value"] == 6000, f"Expected /health p75 last=991.304, got {vals[-1]['value']}"
# /orders (cumulative, service=api): same distribution as /health
if "/orders" in p75_values:
vals = p75_values["/orders"]
assert vals[0]["value"] == 4500, f"Expected /orders p75 first=6400, got {vals[0]['value']}"
assert vals[-1]["value"] == 4500, f"Expected /orders p75 last=991.304, got {vals[-1]['value']}"
# /checkout (delta, service=web): 60 points
if "/checkout" in p75_values:
vals = p75_values["/checkout"]
assert vals[0]["value"] == 6000, f"Expected /checkout p75 zeroth=900, got {vals[0]['value']}"
assert vals[1]["value"] == 6000, f"Expected /checkout p75 first=6400, got {vals[1]['value']}"
assert vals[-1]["value"] == 6000, f"Expected /checkout p75 last=991.304, got {vals[-1]['value']}"
# /coupon (delta, service=web): 60 points
if "/coupon" in p75_values:
vals = p75_values["/coupon"]
assert vals[0]["value"] == 1125, f"Expected /coupon p75 zeroth=900, got {vals[0]['value']}"
assert vals[1]["value"] == 1125, f"Expected /coupon p75 first=6400, got {vals[1]['value']}"
assert vals[-1]["value"] == 1125, f"Expected /coupon p75 last=991.304, got {vals[-1]['value']}"
@pytest.mark.parametrize(
"order_suffix,order_by,limit,expected_count,expected_endpoints, expected_status_codes",
[
(
"no_order",
None,
None,
5,
[ "/checkout", "/health", "/orders", "/coupon", "/coupon"], ## coupon has 200 and 500 status codes so it will appear twice
[ "200", "200", "200", "200", "500"],
),
(
"only_limit",
None,
1,
1,
[ "/checkout"], ##health and checkout have the same size so they are then sorted endpoint as a tiebreaker, and only checkout makes the limit
[ "200"]
),
(
"asc_endpoint",
[build_order_by("endpoint", "asc")],
None,
5,
[ "/checkout", "/coupon", "/coupon", "/health", "/orders"],
[ "200", "200", "500", "200", "200"],
),
(
"asc_endpoint_status_code",
[build_order_by("endpoint", "asc"), build_order_by("status_code", "asc")],
None,
5,
[ "/checkout", "/coupon", "/coupon", "/health", "/orders"],
[ "200", "200", "500", "200", "200"],
),
(
"asc_status_code_endpoint",
[build_order_by("status_code", "asc"), build_order_by("endpoint", "asc")],
None,
5,
[ "/checkout", "/coupon", "/health", "/orders", "/coupon"],
[ "200", "200", "200", "200", "500"],
),
(
"asc_endpoint_limit_2",
[build_order_by("endpoint", "asc")],
2,
2,
[ "/checkout", "/coupon"],
[ "200", "200"],
),
(
"asc_endpoint_status_code_limit_2",
[build_order_by("endpoint", "asc"), build_order_by("status_code", "asc")],
2,
2,
[ "/checkout", "/coupon"],
[ "200", "200"],
),
(
"asc_status_code_endpoint_limit_4",
[build_order_by("status_code", "asc"), build_order_by("endpoint", "asc")],
4,
4,
[ "/checkout", "/coupon", "/health", "/orders"],
[ "200", "200", "200", "200"],
),
(
"desc_endpoint",
[build_order_by("endpoint", "desc")],
None,
5,
["/orders", "/health", "/coupon", "/coupon", "/checkout"],
[ "200", "200", "200", "500", "200"],
),
(
"desc_endpoint_status_code",
[build_order_by("endpoint", "desc"), build_order_by("status_code", "desc")],
None,
5,
["/orders", "/health", "/coupon", "/coupon", "/checkout"],
[ "200", "200", "500", "200", "200"],
),
(
"desc_status_code_endpoint",
[build_order_by("status_code", "desc"), build_order_by("endpoint", "desc")],
None,
5,
["/coupon", "/orders", "/health", "/coupon", "/checkout"],
[ "500", "200", "200", "200", "200"],
),
(
"desc_endpoint_limit2",
[build_order_by("endpoint", "desc")],
3,
3,
["/orders", "/health", "/coupon"],
[ "200", "200", "200"],
),
(
"desc_endpoint_status_code_limit3",
[build_order_by("endpoint", "desc"), build_order_by("status_code", "desc")],
3,
3,
["/orders", "/health", "/coupon"],
[ "200", "200", "500"],
),
(
"desc_status_code_endpoint_limit2",
[build_order_by("status_code", "desc"), build_order_by("endpoint", "desc")],
2,
2,
["/coupon", "/orders"],
[ "500", "200"],
),
],
)
def test_histogram_percentile_group_by_endpoint_and_status_code(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_metrics: Callable[[List[Metrics]], None],
order_suffix: str,
order_by: Optional[List],
limit: Optional[int],
expected_count: int,
expected_endpoints: Union[set, List[str]],
expected_status_codes: Union[set, List[str]],
) -> None:
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
metric_name = f"test_histogram_p75_groupby_{order_suffix}"
metrics = Metrics.load_from_file(
FILE_WITH_MANY_GROUPS,
base_time=now - timedelta(minutes=60),
metric_name_override=metric_name,
)
insert_metrics(metrics)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
query_p75 = build_builder_query(
"A",
metric_name,
"doesnotreallymatter",
"p75",
group_by=["endpoint", "status_code"],
order_by=order_by,
limit=limit,
)
response = make_query_request(signoz, token, start_ms, end_ms, [query_p75])
assert response.status_code == HTTPStatus.OK
data = response.json()
p75_series = get_all_series(data, "A")
for series in p75_series:
endpoint = series.get("labels", [{}])[0].get("value", "unknown")
status_code = series.get("labels", [{}])[1].get("value", "unknown")
values = series.get("values", [])
avg = sum(v["value"] for v in values) / len(values) if values else 0
logger.warning("endpoint=%s status_code=%s average_p75=%.3f", endpoint, status_code, avg)
assert (
len(p75_series) == expected_count
), f"Expected {expected_count} p75 series, got {len(p75_series)}"
_assert_series_endpoint_labels(p75_series, expected_endpoints, "p75")
endpoints = [s.get("labels", [{}])[0].get("value", "unknown") for s in p75_series]
assert endpoints == expected_endpoints, (
f"Expected p75 endpoints in order {expected_endpoints}, got {endpoints}"
)
status_codes = [s.get("labels", [{}])[1].get("value", "unknown") for s in p75_series]
assert status_codes == expected_status_codes, (
f"Expected p75 endpoints in order {expected_status_codes}, got {status_codes}"
)
p75_values = {}
for series in p75_series:
endpoint = series.get("labels", [{}])[0].get("value", "unknown")
status_code = series.get("labels", [{}])[1].get("value", "unknown")
p75_values[endpoint+status_code] = sorted(
series.get("values", []), key=lambda x: x["timestamp"]
)
for endpoint, values in p75_values.items():
for v in values:
assert v["value"] >= 0, f"p75 for {endpoint} should not be negative: {v['value']}"
# /health (cumulative, service=api)
if "/health200" in p75_values:
vals = p75_values["/health200"]
assert vals[0]["value"] == 6000, f"Expected /health p75 first=8000, got {vals[0]['value']}"
assert vals[-1]["value"] == 6000, f"Expected /health p75 last=991.304, got {vals[-1]['value']}"
# /orders (cumulative, service=api): same distribution as /health
if "/orders200" in p75_values:
vals = p75_values["/orders200"]
assert vals[0]["value"] == 4500, f"Expected /orders p75 first=6400, got {vals[0]['value']}"
assert vals[-1]["value"] == 4500, f"Expected /orders p75 last=991.304, got {vals[-1]['value']}"
# /checkout (delta, service=web): 60 points
if "/checkout200" in p75_values:
vals = p75_values["/checkout200"]
assert vals[0]["value"] == 6000, f"Expected /checkout p75 zeroth=900, got {vals[0]['value']}"
assert vals[1]["value"] == 6000, f"Expected /checkout p75 first=6400, got {vals[1]['value']}"
assert vals[-1]["value"] == 6000, f"Expected /checkout p75 last=991.304, got {vals[-1]['value']}"
# /coupon (delta, service=web): 60 points
if "/coupon200" in p75_values:
vals = p75_values["/coupon200"]
assert vals[0]["value"] == 1250, f"Expected /coupon200 p75 zeroth=900, got {vals[0]['value']}"
assert vals[1]["value"] == 1250, f"Expected /coupon200 p75 first=6400, got {vals[1]['value']}"
assert vals[-1]["value"] == 1250, f"Expected /coupon200 p75 last=991.304, got {vals[-1]['value']}"
if "/coupon500" in p75_values:
vals = p75_values["/coupon500"]
assert vals[0]["value"] == 750, f"Expected /coupon500 p75 zeroth=900, got {vals[0]['value']}"
assert vals[1]["value"] == 750, f"Expected /coupon500 p75 first=6400, got {vals[1]['value']}"
assert vals[-1]["value"] == 750, f"Expected /coupon500 p75 last=991.304, got {vals[-1]['value']}"
assert result_values[-1]["value"] == last_value

View File

@@ -5,16 +5,13 @@ Look at the delta_counters_1h.jsonl file for the relevant data
import os
from datetime import datetime, timedelta, timezone
from http import HTTPStatus
from typing import Any, Callable, List, Optional, Union
import pytest
from typing import Any, Callable, List
from fixtures import types
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.metrics import Metrics
from fixtures.querier import (
build_builder_query,
build_order_by,
get_all_series,
get_series_values,
make_query_request,
@@ -72,61 +69,16 @@ def test_rate_with_steady_values_and_reset(
assert v["value"] >= 0, f"Rate should not be negative: {v['value']}"
@pytest.mark.parametrize(
"order_suffix,order_by,limit,expected_count,expected_endpoints",
[
(
"no_order",
None,
None,
5,
{"/products", "/health", "/checkout", "/orders", "/users"},
),
(
"asc",
[build_order_by("endpoint", "asc")],
None,
5,
["/checkout", "/health", "/orders", "/products", "/users"],
),
(
"asc_lim3",
[build_order_by("endpoint", "asc")],
3,
3,
["/checkout", "/health", "/orders"],
),
(
"desc",
[build_order_by("endpoint", "desc")],
None,
5,
["/users", "/products", "/orders", "/health", "/checkout"],
),
(
"desc_lim3",
[build_order_by("endpoint", "desc")],
3,
3,
["/users", "/products", "/orders"],
),
],
)
def test_rate_group_by_endpoint(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_metrics: Callable[[List[Metrics]], None],
order_suffix: str,
order_by: Optional[List],
limit: Optional[int],
expected_count: int,
expected_endpoints: Union[set, List[str]],
) -> None:
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
metric_name = f"test_rate_groupby_{order_suffix}"
metric_name = "test_rate_groupby"
metrics = Metrics.load_from_file(
DELTA_COUNTERS_FILE,
@@ -142,8 +94,6 @@ def test_rate_group_by_endpoint(
"rate",
"sum",
group_by=["endpoint"],
order_by=order_by,
limit=limit,
)
response = make_query_request(signoz, token, start_ms, end_ms, [query])
@@ -152,23 +102,10 @@ def test_rate_group_by_endpoint(
data = response.json()
all_series = get_all_series(data, "A")
# Should have 5 different endpoints
assert (
len(all_series) == expected_count
), f"Expected {expected_count} series, got {len(all_series)}"
endpoint_labels = [
series.get("labels", [{}])[0].get("value", "unknown")
for series in all_series
]
if isinstance(expected_endpoints, set):
assert (
set(endpoint_labels) == expected_endpoints
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_labels)}"
else:
assert endpoint_labels == expected_endpoints, (
f"Expected endpoints {expected_endpoints}, got {endpoint_labels}"
)
len(all_series) == 5
), f"Expected 5 series for 5 endpoints, got {len(all_series)}"
# endpoint -> values
endpoint_values = {}
@@ -177,6 +114,11 @@ def test_rate_group_by_endpoint(
values = sorted(series.get("values", []), key=lambda x: x["timestamp"])
endpoint_values[endpoint] = values
expected_endpoints = {"/products", "/health", "/checkout", "/orders", "/users"}
assert (
set(endpoint_values.keys()) == expected_endpoints
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_values.keys())}"
# at no point rate should be negative
for endpoint, values in endpoint_values.items():
for v in values:
@@ -186,95 +128,93 @@ def test_rate_group_by_endpoint(
# /health: 60 data points (t01-t60), steady +10/min
# rate = 10/60 = 0.167
if "/health" in endpoint_values:
health_values = endpoint_values["/health"]
assert (
len(health_values) == 60
), f"Expected 60 values for /health, got {len(health_values)}"
count_steady_health = sum(1 for v in health_values if v["value"] == 0.167)
assert (
count_steady_health == 60
), f"Expected == 60 steady rate values (0.167) for /health, got {count_steady_health}"
# all /health rates should be 0.167 except possibly first/last due to boundaries
for v in health_values[1:-1]:
assert v["value"] == 0.167, f"Expected /health rate 0.167, got {v['value']}"
health_values = endpoint_values["/health"]
assert (
len(health_values) == 60
), f"Expected 60 values for /health, got {len(health_values)}"
count_steady_health = sum(1 for v in health_values if v["value"] == 0.167)
assert (
count_steady_health == 60
), f"Expected == 60 steady rate values (0.167) for /health, got {count_steady_health}"
# all /health rates should be 0.167 except possibly first/last due to boundaries
for v in health_values[1:-1]:
assert v["value"] == 0.167, f"Expected /health rate 0.167, got {v['value']}"
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
# rate = 20/60 = 0.333, gap causes lower averaged rate at boundary
if "/products" in endpoint_values:
products_values = endpoint_values["/products"]
assert (
len(products_values) == 51
), f"Expected 51 values for /products, got {len(products_values)}"
count_steady_products = sum(1 for v in products_values if v["value"] == 0.333)
assert (
count_steady_products == 51
), f"Expected 51 steady rate values (0.333) for /products, got {count_steady_products}"
products_values = endpoint_values["/products"]
assert (
len(products_values) == 51
), f"Expected 51 values for /products, got {len(products_values)}"
count_steady_products = sum(1 for v in products_values if v["value"] == 0.333)
assert (
count_steady_products == 51
), f"Expected 51 steady rate values (0.333) for /products, got {count_steady_products}"
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
# normal rate = 1/60 = 0.0167, spike rate = 50/60 = 0.833
if "/checkout" in endpoint_values:
checkout_values = endpoint_values["/checkout"]
checkout_values = endpoint_values["/checkout"]
assert (
len(checkout_values) == 61
), f"Expected 61 values for /checkout, got {len(checkout_values)}"
count_steady_checkout = sum(1 for v in checkout_values if v["value"] == 0.0167)
assert (
count_steady_checkout == 56
), f"Expected 56 steady rate values (0.0167) for /checkout, got {count_steady_checkout}"
# check that spike values exist (traffic spike +50/min at t40-t44)
count_spike_checkout = sum(1 for v in checkout_values if v["value"] == 0.833)
assert (
count_spike_checkout == 5
), f"Expected 5 spike rate values (0.833) for /checkout, got {count_spike_checkout}"
# spike values should be consecutive
spike_indices = [
i for i, v in enumerate[Any](checkout_values) if v["value"] == 0.833
]
assert len(spike_indices) == 5, f"Expected 5 spike indices, got {spike_indices}"
# consecutiveness
for i in range(1, len(spike_indices)):
assert (
len(checkout_values) == 61
), f"Expected 61 values for /checkout, got {len(checkout_values)}"
count_steady_checkout = sum(1 for v in checkout_values if v["value"] == 0.0167)
assert (
count_steady_checkout == 56
), f"Expected 56 steady rate values (0.0167) for /checkout, got {count_steady_checkout}"
# check that spike values exist (traffic spike +50/min at t40-t44)
count_spike_checkout = sum(1 for v in checkout_values if v["value"] == 0.833)
assert (
count_spike_checkout == 5
), f"Expected 5 spike rate values (0.833) for /checkout, got {count_spike_checkout}"
# spike values should be consecutive
spike_indices = [
i for i, v in enumerate[Any](checkout_values) if v["value"] == 0.833
]
assert len(spike_indices) == 5, f"Expected 5 spike indices, got {spike_indices}"
for i in range(1, len(spike_indices)):
assert (
spike_indices[i] == spike_indices[i - 1] + 1
), f"Spike indices should be consecutive, got {spike_indices}"
spike_indices[i] == spike_indices[i - 1] + 1
), f"Spike indices should be consecutive, got {spike_indices}"
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
# rate = 5/60 = 0.0833
# reset at t31 causes: rate at t30 includes gap (lower), t31 has high rate after reset
if "/orders" in endpoint_values:
orders_values = endpoint_values["/orders"]
assert (
len(orders_values) == 60
), f"Expected 59 values for /orders, got {len(orders_values)}"
count_steady_orders = sum(1 for v in orders_values if v["value"] == 0.0833)
assert (
count_steady_orders == 58
), f"Expected 58 steady rate values (0.0833) for /orders, got {count_steady_orders}"
# check for counter reset effects - there should be some non-standard values
non_standard_orders = [v["value"] for v in orders_values if v["value"] != 0.0833]
assert (
len(non_standard_orders) == 2
), f"Expected 2 non-standard values due to counter reset, got {non_standard_orders}"
# post-reset value should be higher (new counter value / interval)
high_rate_orders = [v for v in non_standard_orders if v > 0.0833]
assert (
len(high_rate_orders) == 1
), f"Expected one high rate value after counter reset, got {non_standard_orders}"
orders_values = endpoint_values["/orders"]
assert (
len(orders_values) == 60
), f"Expected 59 values for /orders, got {len(orders_values)}"
count_steady_orders = sum(1 for v in orders_values if v["value"] == 0.0833)
assert (
count_steady_orders == 58
), f"Expected 58 steady rate values (0.0833) for /orders, got {count_steady_orders}"
# check for counter reset effects - there should be some non-standard values
non_standard_orders = [v["value"] for v in orders_values if v["value"] != 0.0833]
assert (
len(non_standard_orders) == 2
), f"Expected 2 non-standard values due to counter reset, got {non_standard_orders}"
# post-reset value should be higher (new counter value / interval)
high_rate_orders = [v for v in non_standard_orders if v > 0.0833]
assert (
len(high_rate_orders) == 1
), f"Expected one high rate value after counter reset, got {non_standard_orders}"
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes (12 of them)
# Rate = 1/60 = 0.0167 during increment, 0 during flat periods
if "/users" in endpoint_values:
users_values = endpoint_values["/users"]
assert (
len(users_values) == 56
), f"Expected 56 values for /users, got {len(users_values)}"
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
# most values should be 0 (flat periods between increments)
assert (
count_zero_users == 44
), f"Expected 44 zero rate values for /users (sparse data), got {count_zero_users}"
# non-zero values should be 0.0167 (1/60 increment rate)
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
count_increment_rate = sum(1 for v in non_zero_users if v == 0.0167)
assert (
count_increment_rate == 12
), f"Expected 12 increment rate values (0.0167) for /users, got {count_increment_rate}"
users_values = endpoint_values["/users"]
assert (
len(users_values) == 56
), f"Expected 56 values for /users, got {len(users_values)}"
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
# most values should be 0 (flat periods between increments)
assert (
count_zero_users == 44
), f"Expected 44 zero rate values for /users (sparse data), got {count_zero_users}"
# non-zero values should be 0.0167 (1/60 increment rate)
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
count_increment_rate = sum(1 for v in non_zero_users if v == 0.0167)
assert (
count_increment_rate == 12
), f"Expected 12 increment rate values (0.0167) for /users, got {count_increment_rate}"

File diff suppressed because it is too large Load Diff