Compare commits

..

30 Commits

Author SHA1 Message Date
grandwizard28
da21f498aa feat(audit): warn when AttachManyAuditDef resolves to zero events
When BodyJSONArray (or any multi-id extractor) returns an empty slice for
an AttachManyAuditDef, the middleware emits zero attach events. That can
mask configuration mistakes (wrong JSON path, body not pre-buffered, etc).
Surface a warn-level log with the body size and a head excerpt so the
operator can spot the empty extraction quickly.
2026-05-11 17:45:38 +05:30
grandwizard28
948eab368c feat(apiserver): instrument remaining service-account routes + fix user-role attach
- Add audit instrumentation for the 5 SA routes missed in the original pass:
  POST /api/v1/service_accounts (Create), PUT /{id} (Update), PUT /me, DELETE
  /{id}, PUT /{id}/keys/{fid} (factor-api-key update).
- Add AttachAuditDef for the SA role attach/detach routes:
  POST /api/v1/service_accounts/{id}/roles (role id from body)
  DELETE /api/v1/service_accounts/{id}/roles/{rid} (role id from path)
- Fix user-role attach: PostableRole has 'name' not 'id'; BodyJSONPath now
  reads name. The role identifier in the role-attach body is the role name.
- Switch session Create to ResourceMetaResourcesSession (plural) to match
  the Create-uses-plural convention applied elsewhere.
2026-05-11 16:35:41 +05:30
grandwizard28
d84327e08e feat(audit): fan out attach events from request-body entity lists
Adds AttachManyAuditDef + BodyJSONArray extractor. The middleware now emits
one attach event per id returned by AttachedResourceIDs, in addition to the
primary create event. Applied to routes whose request body carries a list
of references to other entities:

- POST /api/v2/rules — preferredChannels → notification-channel.attached to rule
- POST /api/v1/downtime_schedules — alertIds → rule.attached to planned-maintenance
- POST /api/v1/route_policies — channels → notification-channel.attached to route-policy

Also fans out simple create+attach pairs for nested resources where the
child has its own identity:

- POST /api/v2/gateway/ingestion_keys/{keyId}/limits → ingestion-limit.created
  + ingestion-limit.attached to ingestion-key
- POST /api/v1/span_mapper_groups/{groupId}/span_mappers → span-mapper.created
  + span-mapper.attached to span-mapper-group

All create events now use ResourceMetaResources (plural) and pull the new
id from data.id via ResponseJSONPath.
2026-05-11 16:14:20 +05:30
grandwizard28
ac280d9b9c feat(audit): add ResponseJSONPath extractor and pre-buffer request body
ResourceIDExtractor now takes an ExtractorContext bundling the request, the
pre-buffered request body, and the captured response body. Adds:

- ResponseJSONPath(path) reads the captured response body via gjson, for
  Create routes where the new resource id is only known after the handler
  writes the response.
- BodyJSONPath now reads from the pre-buffered request bytes rather than
  re-reading req.Body. This fixes the latent bug where the handler would
  have already consumed the body before the extractor ran.

The middleware pre-buffers the request body when the matched route declares
any AuditDefs, restoring req.Body via NopCloser so the handler still sees
the full payload.

SA factor-api-key POST now pulls the new key's id from the response body
(data.id) for both the created and attached events.
2026-05-11 16:05:34 +05:30
grandwizard28
cfeae11ade feat(audit): allow multiple AuditDefs per route, variadic WithAuditDef
Single endpoints can produce multiple semantically distinct audit events.
The first concrete case is POST /api/v1/service_accounts/{id}/keys, which
both creates a factor-api-key and attaches it to a service account.

- WithAuditDef becomes variadic; handler stores []AuditDef
- Handler interface returns AuditDefs() []AuditDef
- Middleware loops and emits one audit event per def
- SA factor-api-key POST now emits factor-api-key.created (collection-level)
  plus factor-api-key.attached to the parent service-account
2026-05-11 15:58:53 +05:30
grandwizard28
c0af4e1135 feat(coretypes): rename KindMetric to KindMetricField
Mirror the logs-field / traces-field naming. The /api/v2/metrics/{metric_name}/metadata
audit subject is the metric field (the metric name registered for the org), not a
metric series instance.
2026-05-11 15:32:04 +05:30
grandwizard28
cd82a63369 feat(apiserver): instrument zeus profile and host routes with AuditDef 2026-05-11 14:18:28 +05:30
grandwizard28
ef293e1505 feat(apiserver): instrument span-mapper-group and span-mapper routes with AuditDef 2026-05-11 14:17:29 +05:30
grandwizard28
2eab9474c0 feat(apiserver): instrument role routes with AuditDef 2026-05-11 14:16:44 +05:30
grandwizard28
132bf8478a feat(apiserver): instrument promote-and-index paths route with AuditDef 2026-05-11 14:16:05 +05:30
grandwizard28
68ab3917ee feat(apiserver): instrument user-preference and org-preference routes with AuditDef 2026-05-11 14:15:44 +05:30
grandwizard28
52516eeec5 feat(apiserver): instrument organization update route with AuditDef 2026-05-11 14:15:02 +05:30
grandwizard28
93eb8c7419 feat(apiserver): instrument metric-metadata route with AuditDef 2026-05-11 14:14:18 +05:30
grandwizard28
47e5507490 feat(apiserver): instrument llm-pricing-rule routes with AuditDef 2026-05-11 14:13:55 +05:30
grandwizard28
f9e296de9f feat(apiserver): instrument ingestion-key and ingestion-limit routes with AuditDef 2026-05-11 14:13:15 +05:30
grandwizard28
4ff9323dc8 feat(apiserver): instrument public-dashboard routes with AuditDef 2026-05-11 14:12:28 +05:30
grandwizard28
f8570e9713 feat(apiserver): instrument cloud-integration routes with AuditDef 2026-05-11 14:11:48 +05:30
grandwizard28
ecc748c811 feat(apiserver): instrument auth-domain routes with AuditDef 2026-05-11 14:11:08 +05:30
grandwizard28
a5db2c1fca feat(apiserver): instrument notification-channel and route-policy routes with AuditDef 2026-05-11 14:10:36 +05:30
grandwizard28
8653dd3c5c feat(coretypes): register kinds for llm-pricing-rule, span mappers, zeus, metric 2026-05-11 14:09:42 +05:30
grandwizard28
aa3f45a2c7 refactor(audit): make AuditDef a sealed interface; use coretypes.Resource
AuditDef becomes an interface with two implementations: BasicAuditDef
for single-resource routes and AttachAuditDef for routes that attach
one resource to another. Resource ids are pulled via a ResourceIDExtractor
function — PathParam for mux vars, BodyJSONPath for body fields via gjson.

ResourceAttributes carries a coretypes.Resource (not just a Kind) so the
middleware can emit signoz.audit.resource.object via Resource.Object(orgID,
id) alongside resource.kind / resource.id. Attach events also emit
signoz.audit.target.{kind,id,object} for the target side of the attachment.

Routes:
- session, user, factor-password, factor-api-key, rule, planned-maintenance
  use BasicAuditDef with the matching ResourceMetaResourceX (or ResourceUser).
- POST /api/v2/users/{id}/roles and DELETE /api/v2/users/{id}/roles/{roleId}
  use AttachAuditDef — event name role.attached. POST extracts role id from
  body via gjson; DELETE pulls roleId from the path.
- Reset-password-token route emits factor-password.updated (was the bad
  reset-password-token kind).

Deletes KindUserRole and KindResetPasswordToken from the kind registry.

Affected unit tests deleted; the integration tests under
tests/integration/tests/auditor still reference the old event names and
will be rewritten in a follow-up.
2026-05-11 13:59:48 +05:30
grandwizard28
cb7b40cd7c refactor(audit): rename Action field to Verb on AuditDef and AuditAttributes
Drop the Action alias; the field has always held a coretypes.Verb value.
Rename the OTel attribute key signoz.audit.action to signoz.audit.verb
to match. ActionCategory is a distinct enum and stays as-is.
2026-05-11 13:06:36 +05:30
grandwizard28
9ebcf765d0 test(auditor): move shared helpers from suite conftest to fixtures/auditor.py
Mirrors the existing fixtures/audit.py / fixtures/auth.py / fixtures/signoz.py
layout — domain-named module under fixtures/ registered via pytest_plugins,
suite-local conftest.py kept thin (only the signoz fixture override that
configures the auditor container).
2026-05-11 12:31:25 +05:30
grandwizard28
e5ebd86ad6 test(auditor): bind-mount audit dir and atomic batch write
Mirror the sqlite/clickhouse pattern: the audit log file lives in a tmpfs
directory bind-mounted into the SigNoz container at the same absolute path,
so tests read it via a plain open() instead of docker exec. The directory
path is cached via reuse.wrap so the long-lived auditor container under
--reuse keeps using the matching mount across pytest runs.

The file provider's export now writes payload + newline in a single Write
call so a concurrent host reader cannot observe a torn JSON object.
2026-05-11 12:31:19 +05:30
grandwizard28
72e1d1aace test(auditor): integration tests for file provider event emission 2026-05-11 12:27:42 +05:30
grandwizard28
b396b16a1e feat(apiserver): instrument rule and planned-maintenance routes with AuditDef 2026-05-11 12:25:08 +05:30
grandwizard28
7a90ca5e9e feat(apiserver): instrument service account API key routes with AuditDef 2026-05-11 12:24:07 +05:30
grandwizard28
46a7b1b1ff feat(apiserver): instrument user v2 P0 routes with AuditDef 2026-05-11 12:21:51 +05:30
grandwizard28
23ee24732d feat(apiserver): instrument session P0 routes with AuditDef 2026-05-11 12:20:09 +05:30
grandwizard28
ed1ab29307 feat(coretypes): register user-role and reset-password-token kinds 2026-05-11 12:19:28 +05:30
911 changed files with 26211 additions and 67006 deletions

View File

@@ -54,7 +54,6 @@ jobs:
JS_SRC: frontend
JS_OUTPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }}
JS_OUTPUT_ARTIFACT_PATH: frontend/build
JS_PKG_MANAGER: pnpm
DOCKER_BUILD: false
DOCKER_MANIFEST: false
go-build:

View File

@@ -87,7 +87,6 @@ jobs:
JS_INPUT_ARTIFACT_PATH: frontend/.env
JS_OUTPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }}
JS_OUTPUT_ARTIFACT_PATH: frontend/build
JS_PKG_MANAGER: pnpm
DOCKER_BUILD: false
DOCKER_MANIFEST: false
go-build:

View File

@@ -86,7 +86,6 @@ jobs:
JS_INPUT_ARTIFACT_PATH: frontend/.env
JS_OUTPUT_ARTIFACT_CACHE_KEY: staging-jsbuild-${{ github.sha }}
JS_OUTPUT_ARTIFACT_PATH: frontend/build
JS_PKG_MANAGER: pnpm
DOCKER_BUILD: false
DOCKER_MANIFEST: false
go-build:

View File

@@ -21,19 +21,15 @@ jobs:
uses: actions/setup-node@v4
with:
node-version: lts/*
- name: install-pnpm
uses: pnpm/action-setup@v6
with:
version: 10
- name: install
run: |
cd tests/e2e && pnpm install
cd tests/e2e && yarn install --frozen-lockfile
- name: fmt
run: |
cd tests/e2e && pnpm fmt:check
cd tests/e2e && yarn fmt:check
- name: lint
run: |
cd tests/e2e && pnpm lint
cd tests/e2e && yarn lint
test:
strategy:
fail-fast: false
@@ -58,19 +54,15 @@ jobs:
uses: actions/setup-node@v4
with:
node-version: lts/*
- name: install-pnpm
uses: pnpm/action-setup@v6
with:
version: 10
- name: python-install
run: |
cd tests && uv sync
- name: pnpm-install
- name: yarn-install
run: |
cd tests/e2e && pnpm install --frozen-lockfile
cd tests/e2e && yarn install --frozen-lockfile
- name: playwright-browsers
run: |
cd tests/e2e && pnpm playwright install --with-deps ${{ matrix.project }}
cd tests/e2e && yarn playwright install --with-deps ${{ matrix.project }}
- name: bring-up-stack
run: |
cd tests && \
@@ -81,7 +73,7 @@ jobs:
- name: playwright-test
run: |
cd tests/e2e && \
pnpm playwright test --project=${{ matrix.project }}
yarn playwright test --project=${{ matrix.project }}
- name: teardown-stack
if: always()
run: |

View File

@@ -77,10 +77,6 @@ jobs:
uses: actions/setup-node@v5
with:
node-version: "22"
- name: setup-pnpm
uses: pnpm/action-setup@v6
with:
version: 10
- name: docker-community
shell: bash
run: |

View File

@@ -25,10 +25,6 @@ jobs:
uses: actions/setup-node@v5
with:
node-version: "22"
- name: setup-pnpm
uses: pnpm/action-setup@v6
with:
version: 10
- name: build-frontend
run: make js-build
- name: upload-frontend-artifact

View File

@@ -41,10 +41,6 @@ jobs:
uses: actions/setup-node@v5
with:
node-version: "22"
- name: setup-pnpm
uses: pnpm/action-setup@v6
with:
version: 10
- name: build-frontend
run: make js-build
- name: upload-frontend-artifact

View File

@@ -22,7 +22,6 @@ jobs:
with:
PRIMUS_REF: main
JS_SRC: frontend
JS_PKG_MANAGER: pnpm
test:
if: |
github.event_name == 'merge_group' ||
@@ -33,7 +32,6 @@ jobs:
with:
PRIMUS_REF: main
JS_SRC: frontend
JS_PKG_MANAGER: pnpm
fmt:
if: |
github.event_name == 'merge_group' ||
@@ -44,7 +42,6 @@ jobs:
with:
PRIMUS_REF: main
JS_SRC: frontend
JS_PKG_MANAGER: pnpm
lint:
if: |
github.event_name == 'merge_group' ||
@@ -55,7 +52,6 @@ jobs:
with:
PRIMUS_REF: main
JS_SRC: frontend
JS_PKG_MANAGER: pnpm
languages:
if: |
github.event_name == 'merge_group' ||
@@ -80,13 +76,9 @@ jobs:
uses: actions/setup-node@v5
with:
node-version: "22"
- name: install-pnpm
uses: pnpm/action-setup@v6
with:
version: 10
- name: install-frontend
run: cd frontend && pnpm install
run: cd frontend && yarn install
- name: generate-api-clients
run: |
cd frontend && pnpm generate:api
git diff --compact-summary --exit-code || (echo; echo "Unexpected difference in generated api clients. Run pnpm generate:api in frontend/ locally and commit."; exit 1)
cd frontend && yarn generate:api
git diff --compact-summary --exit-code || (echo; echo "Unexpected difference in generated api clients. Run yarn generate:api in frontend/ locally and commit."; exit 1)

View File

@@ -1,21 +1,19 @@
# Please adjust to your needs (see https://www.gitpod.io/docs/config-gitpod-file)
# and commit this file to your remote git repository to share the goodness with others.
tasks:
- name: Run Docker Images
init: |
cd ./deploy/docker
sudo docker compose up -d
- name: Install pnpm
init: |
npm i -g pnpm
- name: Run Frontend
init: |
cd ./frontend
pnpm install
command: pnpm dev
yarn install
command:
yarn dev
ports:
- port: 8080

View File

@@ -154,7 +154,7 @@ $(GO_BUILD_ARCHS_ENTERPRISE_RACE): go-build-enterprise-race-%: $(TARGET_DIR)
.PHONY: js-build
js-build: ## Builds the js frontend
@echo ">> building js frontend"
@cd $(JS_BUILD_CONTEXT) && CI=1 pnpm install && pnpm build
@cd $(JS_BUILD_CONTEXT) && CI=1 yarn install && yarn build
##############################################################
# docker commands

View File

@@ -66,9 +66,10 @@ func runGenerateAuthz(_ context.Context) error {
registry := coretypes.NewRegistry()
allowedResources := map[string]bool{
coretypes.NewResourceRef(coretypes.ResourceServiceAccount).String(): true,
coretypes.NewResourceRef(coretypes.ResourceRole).String(): true,
coretypes.NewResourceRef(coretypes.ResourceMetaResourceFactorAPIKey).String(): true,
coretypes.NewResourceRef(coretypes.ResourceServiceAccount).String(): true,
coretypes.NewResourceRef(coretypes.ResourceMetaResourcesServiceAccount).String(): true,
coretypes.NewResourceRef(coretypes.ResourceRole).String(): true,
coretypes.NewResourceRef(coretypes.ResourceMetaResourcesRole).String(): true,
}
allowedTypes := map[string]bool{}

View File

@@ -18,19 +18,16 @@ import (
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/gateway"
"github.com/SigNoz/signoz/pkg/gateway/noopgateway"
"github.com/SigNoz/signoz/pkg/global"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/licensing/nooplicensing"
"github.com/SigNoz/signoz/pkg/meterreporter"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration/implcloudintegration"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/modules/retention"
"github.com/SigNoz/signoz/pkg/modules/rulestatehistory"
"github.com/SigNoz/signoz/pkg/modules/serviceaccount"
"github.com/SigNoz/signoz/pkg/prometheus"
@@ -112,9 +109,6 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
func(_ licensing.Licensing) factory.NamedMap[factory.ProviderFactory[auditor.Auditor, auditor.Config]] {
return signoz.NewAuditorProviderFactories()
},
func(_ context.Context, _ factory.ProviderSettings, _ flagger.Flagger, _ licensing.Licensing, _ telemetrystore.TelemetryStore, _ retention.Getter, _ organization.Getter, _ zeus.Zeus) (factory.NamedMap[factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config]], string) {
return signoz.NewMeterReporterProviderFactories(), "noop"
},
func(ps factory.ProviderSettings, q querier.Querier, a analytics.Analytics) querier.Handler {
return querier.NewHandler(ps, q, a)
},

View File

@@ -3,9 +3,8 @@ FROM node:22-bookworm AS build
WORKDIR /opt/
COPY ./frontend/ ./
ENV NODE_OPTIONS=--max-old-space-size=8192
RUN CI=1 npm i -g pnpm@10
RUN CI=1 pnpm install
RUN CI=1 pnpm build
RUN CI=1 yarn install
RUN CI=1 yarn build
FROM golang:1.25-bookworm

View File

@@ -1,55 +0,0 @@
package main
import (
"github.com/SigNoz/signoz/pkg/metercollector"
"github.com/SigNoz/signoz/pkg/telemetrylogs"
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
"github.com/SigNoz/signoz/pkg/telemetrytraces"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
"github.com/SigNoz/signoz/pkg/types/zeustypes"
)
var meterConfigs = []metercollector.Config{
{
Provider: metercollector.ProviderStatic,
Static: metercollector.StaticConfig{
Name: zeustypes.MeterPlatformActive,
Unit: zeustypes.MeterUnitCount,
Aggregation: zeustypes.MeterAggregationMax,
Value: 1,
},
},
{
Provider: metercollector.ProviderTelemetry,
Telemetry: metercollector.TelemetryConfig{
Name: zeustypes.MeterLogSize,
Unit: zeustypes.MeterUnitBytes,
Aggregation: zeustypes.MeterAggregationSum,
DBName: telemetrylogs.DBName,
TableName: telemetrylogs.LogsV2LocalTableName,
DefaultRetentionDays: retentiontypes.DefaultLogsRetentionDays,
},
},
{
Provider: metercollector.ProviderTelemetry,
Telemetry: metercollector.TelemetryConfig{
Name: zeustypes.MeterSpanSize,
Unit: zeustypes.MeterUnitBytes,
Aggregation: zeustypes.MeterAggregationSum,
DBName: telemetrytraces.DBName,
TableName: telemetrytraces.SpanIndexV3LocalTableName,
DefaultRetentionDays: retentiontypes.DefaultTracesRetentionDays,
},
},
{
Provider: metercollector.ProviderTelemetry,
Telemetry: metercollector.TelemetryConfig{
Name: zeustypes.MeterDatapointCount,
Unit: zeustypes.MeterUnitCount,
Aggregation: zeustypes.MeterAggregationSum,
DBName: telemetrymetrics.DBName,
TableName: telemetrymetrics.SamplesV4LocalTableName,
DefaultRetentionDays: retentiontypes.DefaultMetricsRetentionDays,
},
},
}

View File

@@ -18,9 +18,6 @@ import (
"github.com/SigNoz/signoz/ee/gateway/httpgateway"
enterpriselicensing "github.com/SigNoz/signoz/ee/licensing"
"github.com/SigNoz/signoz/ee/licensing/httplicensing"
"github.com/SigNoz/signoz/ee/metercollector/staticmetercollector"
"github.com/SigNoz/signoz/ee/metercollector/telemetrymetercollector"
"github.com/SigNoz/signoz/ee/meterreporter/httpmeterreporter"
"github.com/SigNoz/signoz/ee/modules/cloudintegration/implcloudintegration"
"github.com/SigNoz/signoz/ee/modules/cloudintegration/implcloudintegration/implcloudprovider"
"github.com/SigNoz/signoz/ee/modules/dashboard/impldashboard"
@@ -39,17 +36,14 @@ import (
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
pkgflagger "github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/gateway"
"github.com/SigNoz/signoz/pkg/global"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/meterreporter"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
pkgcloudintegration "github.com/SigNoz/signoz/pkg/modules/cloudintegration/implcloudintegration"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
pkgimpldashboard "github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/modules/retention"
"github.com/SigNoz/signoz/pkg/modules/rulestatehistory"
"github.com/SigNoz/signoz/pkg/modules/serviceaccount"
"github.com/SigNoz/signoz/pkg/prometheus"
@@ -167,20 +161,6 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
}
return factories
},
func(ctx context.Context, providerSettings factory.ProviderSettings, flagger pkgflagger.Flagger, licensing licensing.Licensing, telemetryStore telemetrystore.TelemetryStore, retentionGetter retention.Getter, orgGetter organization.Getter, zeus zeus.Zeus) (factory.NamedMap[factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config]], string) {
factories := signoz.NewMeterReporterProviderFactories()
collectorFactories := factory.MustNewNamedMap(
staticmetercollector.NewFactory(),
telemetrymetercollector.NewFactory(telemetryStore, retentionGetter),
)
if err := factories.Add(httpmeterreporter.NewFactory(collectorFactories, meterConfigs, flagger, licensing, orgGetter, zeus)); err != nil {
panic(err)
}
return factories, "http"
},
func(ps factory.ProviderSettings, q querier.Querier, a analytics.Analytics) querier.Handler {
communityHandler := querier.NewHandler(ps, q, a)
return eequerier.NewHandler(ps, q, communityHandler)

View File

@@ -429,10 +429,3 @@ authz:
openfga:
# maximum tuples allowed per openfga write operation.
max_tuples_per_write: 300
##################### Meter Reporter #####################
meterreporter:
# The interval between collection ticks. Minimum 5m.
interval: 6h
# Whether to backfill sealed days from the license creation day.
backfill: true

View File

@@ -213,7 +213,7 @@ services:
retries: 3
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:v0.144.4
image: signoz/signoz-otel-collector:v0.144.3
entrypoint:
- /bin/sh
command:
@@ -241,7 +241,7 @@ services:
replicas: 3
signoz-telemetrystore-migrator:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:v0.144.4
image: signoz/signoz-otel-collector:v0.144.3
environment:
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster

View File

@@ -139,7 +139,7 @@ services:
retries: 3
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:v0.144.4
image: signoz/signoz-otel-collector:v0.144.3
entrypoint:
- /bin/sh
command:
@@ -167,7 +167,7 @@ services:
replicas: 3
signoz-telemetrystore-migrator:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:v0.144.4
image: signoz/signoz-otel-collector:v0.144.3
environment:
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster

View File

@@ -204,7 +204,7 @@ services:
retries: 3
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.144.4}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.144.3}
container_name: signoz-otel-collector
entrypoint:
- /bin/sh
@@ -229,7 +229,7 @@ services:
- "4318:4318" # OTLP HTTP receiver
signoz-telemetrystore-migrator:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.144.4}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.144.3}
container_name: signoz-telemetrystore-migrator
environment:
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000

View File

@@ -132,7 +132,7 @@ services:
retries: 3
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.144.4}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.144.3}
container_name: signoz-otel-collector
entrypoint:
- /bin/sh
@@ -157,7 +157,7 @@ services:
- "4318:4318" # OTLP HTTP receiver
signoz-telemetrystore-migrator:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.144.4}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.144.3}
container_name: signoz-telemetrystore-migrator
environment:
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000

File diff suppressed because it is too large Load Diff

View File

@@ -13,12 +13,13 @@ Before diving in, make sure you have these tools installed:
- Download from [go.dev/dl](https://go.dev/dl/)
- Check [go.mod](../../go.mod#L3) for the minimum version
- **Node** - Powers our frontend
- Download from [nodejs.org](https://nodejs.org)
- Check [.nvmrc](../../frontend/.nvmrc) for the version
- **Pnpm** - Our frontend package manager
- Follow the [installation guide](https://pnpm.io/installation)
- **Yarn** - Our frontend package manager
- Follow the [installation guide](https://yarnpkg.com/getting-started/install)
- **Docker** - For running Clickhouse and Postgres locally
- Get it from [docs.docker.com/get-docker](https://docs.docker.com/get-docker/)
@@ -94,7 +95,7 @@ This command:
2. Install dependencies:
```bash
pnpm install
yarn install
```
3. Create a `.env` file in this directory:
@@ -104,10 +105,10 @@ This command:
4. Start the development server:
```bash
pnpm dev
yarn dev
```
> 💡 **Tip**: `pnpm dev` will automatically rebuild when you make changes to the code
> 💡 **Tip**: `yarn dev` will automatically rebuild when you make changes to the code
Now you're all set to start developing! Happy coding! 🎉

View File

@@ -304,7 +304,7 @@ import ec2Url from '@/assets/Logos/ec2.svg';
1. Add the logo SVG to `src/assets/Logos/` and add a top-level import in the config file (e.g., `import myServiceUrl from '@/assets/Logos/my-service.svg'`)
2. Add your data source object to the `onboardingConfigWithLinks` array, referencing the imported variable for `imgUrl`
3. Test the flow locally with `pnpm dev`
3. Test the flow locally with `yarn dev`
4. Validation:
- Navigate to the [onboarding page](http://localhost:3301/get-started-with-signoz-cloud) on your local machine
- Data source appears in the list

View File

@@ -87,7 +87,7 @@ func (provider *provider) BatchCheck(ctx context.Context, tupleReq map[string]*o
}
func (provider *provider) CheckTransactions(ctx context.Context, subject string, orgID valuer.UUID, transactions []*authtypes.Transaction) ([]*authtypes.TransactionWithAuthorization, error) {
tuples, correlations, err := authtypes.NewTuplesFromTransactionsWithCorrelations(transactions, subject, orgID)
tuples, err := authtypes.NewTuplesFromTransactions(transactions, subject, orgID)
if err != nil {
return nil, err
}
@@ -99,21 +99,10 @@ func (provider *provider) CheckTransactions(ctx context.Context, subject string,
results := make([]*authtypes.TransactionWithAuthorization, len(transactions))
for i, txn := range transactions {
txnID := txn.ID.StringValue()
authorized := batchResults[txnID].Authorized
if !authorized {
for _, correlationID := range correlations[txnID] {
if result, exists := batchResults[correlationID]; exists && result.Authorized {
authorized = true
break
}
}
}
result := batchResults[txn.ID.StringValue()]
results[i] = &authtypes.TransactionWithAuthorization{
Transaction: txn,
Authorized: authorized,
Authorized: result.Authorized,
}
}
return results, nil

View File

@@ -7,27 +7,17 @@ type organization
type user
relations
define create: [user, serviceaccount, role#assignee]
define list: [user, serviceaccount, role#assignee]
define read: [user, serviceaccount, role#assignee]
define update: [user, serviceaccount, role#assignee]
define delete: [user, serviceaccount, role#assignee]
define attach: [user, serviceaccount, role#assignee]
define detach: [user, serviceaccount, role#assignee]
type serviceaccount
relations
define create: [user, serviceaccount, role#assignee]
define list: [user, serviceaccount, role#assignee]
define read: [user, serviceaccount, role#assignee]
define update: [user, serviceaccount, role#assignee]
define delete: [user, serviceaccount, role#assignee]
define attach: [user, serviceaccount, role#assignee]
define detach: [user, serviceaccount, role#assignee]
type anonymous
@@ -35,28 +25,25 @@ type role
relations
define assignee: [user, serviceaccount, anonymous]
define create: [user, serviceaccount, role#assignee]
define list: [user, serviceaccount, role#assignee]
define read: [user, serviceaccount, role#assignee]
define update: [user, serviceaccount, role#assignee]
define delete: [user, serviceaccount, role#assignee]
define attach: [user, serviceaccount, role#assignee]
define detach: [user, serviceaccount, role#assignee]
type metaresource
type metaresources
relations
define create: [user, serviceaccount, role#assignee]
define list: [user, serviceaccount, role#assignee]
define read: [user, serviceaccount, anonymous, role#assignee]
define update: [user, serviceaccount, role#assignee]
define delete: [user, serviceaccount, role#assignee]
type metaresource
relations
define read: [user, serviceaccount, anonymous, role#assignee]
define update: [user, serviceaccount, role#assignee]
define delete: [user, serviceaccount, role#assignee]
define block: [user, serviceaccount, role#assignee]
define block: [user, serviceaccount, role#assignee]
type telemetryresource
relations
define read: [user, serviceaccount, role#assignee]
define read: [user, serviceaccount, role#assignee]

View File

@@ -1,61 +0,0 @@
// Package staticmetercollector emits a fixed-value meter reading per org per window.
package staticmetercollector
import (
"context"
"time"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/metercollector"
"github.com/SigNoz/signoz/pkg/types/licensetypes"
"github.com/SigNoz/signoz/pkg/types/zeustypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
var _ metercollector.MeterCollector = (*Provider)(nil)
type Provider struct {
settings factory.ScopedProviderSettings
config metercollector.StaticConfig
}
func NewFactory() factory.ProviderFactory[metercollector.MeterCollector, metercollector.Config] {
return factory.NewProviderFactory(factory.MustNewName(metercollector.ProviderStatic), func(ctx context.Context, providerSettings factory.ProviderSettings, config metercollector.Config) (metercollector.MeterCollector, error) {
return newProvider(providerSettings, config.Static), nil
},
)
}
func newProvider(providerSettings factory.ProviderSettings, config metercollector.StaticConfig) *Provider {
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/ee/metercollector/staticmetercollector")
return &Provider{
settings: settings,
config: config,
}
}
func (provider *Provider) Name() zeustypes.MeterName { return provider.config.Name }
func (provider *Provider) Unit() zeustypes.MeterUnit { return provider.config.Unit }
func (provider *Provider) Aggregation() zeustypes.MeterAggregation {
return provider.config.Aggregation
}
func (provider *Provider) Origin(_ context.Context, _ valuer.UUID, license *licensetypes.License, _ time.Time) (time.Time, error) {
if license == nil || license.CreatedAt.IsZero() {
return time.Time{}, nil
}
createdAt := license.CreatedAt.UTC()
return time.Date(createdAt.Year(), createdAt.Month(), createdAt.Day(), 0, 0, 0, 0, time.UTC), nil
}
func (provider *Provider) Collect(_ context.Context, orgID valuer.UUID, license *licensetypes.License, window zeustypes.MeterWindow) ([]zeustypes.Meter, error) {
if license == nil || license.Key == "" {
return nil, nil
}
return []zeustypes.Meter{
zeustypes.NewMeter(provider.config.Name, provider.config.Value, provider.config.Unit, provider.config.Aggregation, window, zeustypes.NewDimensions(zeustypes.OrganizationID.String(orgID.StringValue()))),
}, nil
}

View File

@@ -1,247 +0,0 @@
// Package telemetrymetercollector collects telemetry meters (logs, traces, metrics)
// by retention. One Provider materializes per TelemetryConfig.
package telemetrymetercollector
import (
"context"
"fmt"
"regexp"
"strconv"
"strings"
"time"
"github.com/huandu/go-sqlbuilder"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/metercollector"
"github.com/SigNoz/signoz/pkg/modules/retention"
"github.com/SigNoz/signoz/pkg/telemetrymeter"
"github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/types/licensetypes"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
"github.com/SigNoz/signoz/pkg/types/zeustypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
var (
labelKeyPattern = regexp.MustCompile(`^[A-Za-z0-9_.\-]+$`)
labelValuePattern = regexp.MustCompile(`^[A-Za-z0-9_.\-:]+$`)
)
var _ metercollector.MeterCollector = (*Provider)(nil)
type Provider struct {
settings factory.ScopedProviderSettings
config metercollector.TelemetryConfig
telemetryStore telemetrystore.TelemetryStore
retentionGetter retention.Getter
}
func NewFactory(telemetryStore telemetrystore.TelemetryStore, retentionGetter retention.Getter) factory.ProviderFactory[metercollector.MeterCollector, metercollector.Config] {
return factory.NewProviderFactory(factory.MustNewName(metercollector.ProviderTelemetry), func(ctx context.Context, providerSettings factory.ProviderSettings, config metercollector.Config) (metercollector.MeterCollector, error) {
return newProvider(providerSettings, config.Telemetry, telemetryStore, retentionGetter), nil
},
)
}
func newProvider(
providerSettings factory.ProviderSettings,
config metercollector.TelemetryConfig,
telemetryStore telemetrystore.TelemetryStore,
retentionGetter retention.Getter,
) *Provider {
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/ee/metercollector/telemetrymetercollector")
return &Provider{
settings: settings,
config: config,
telemetryStore: telemetryStore,
retentionGetter: retentionGetter,
}
}
func (provider *Provider) Name() zeustypes.MeterName { return provider.config.Name }
func (provider *Provider) Unit() zeustypes.MeterUnit { return provider.config.Unit }
func (provider *Provider) Aggregation() zeustypes.MeterAggregation {
return provider.config.Aggregation
}
func (provider *Provider) Origin(ctx context.Context, _ valuer.UUID, _ *licensetypes.License, todayStart time.Time) (time.Time, error) {
query, args := buildOriginQuery(provider.config.Name.String())
var minMs int64
if err := provider.telemetryStore.ClickhouseDB().QueryRow(ctx, query, args...).Scan(&minMs); err != nil {
return time.Time{}, err
}
if minMs == 0 {
return todayStart, nil
}
minDay := time.UnixMilli(minMs).UTC()
return time.Date(minDay.Year(), minDay.Month(), minDay.Day(), 0, 0, 0, 0, time.UTC), nil
}
func (provider *Provider) Collect(
ctx context.Context,
orgID valuer.UUID,
_ *licensetypes.License,
window zeustypes.MeterWindow,
) ([]zeustypes.Meter, error) {
meterName := provider.config.Name.String()
segments, err := provider.retentionGetter.GetRetentionPolicySegments(
ctx,
orgID,
provider.config.DBName,
provider.config.TableName,
provider.config.DefaultRetentionDays,
window.StartUnixMilli,
window.EndUnixMilli,
)
if err != nil {
return nil, err
}
valuesByRetentionDays := make(map[int]int64)
for _, segment := range segments {
query, args, err := buildQuery(meterName, segment)
if err != nil {
return nil, err
}
rows, err := provider.telemetryStore.ClickhouseDB().Query(ctx, query, args...)
if err != nil {
return nil, err
}
if err := func() error {
defer rows.Close()
for rows.Next() {
var retentionDays int32
var value int64
if err := rows.Scan(&retentionDays, &value); err != nil {
return err
}
valuesByRetentionDays[int(retentionDays)] += value
}
if err := rows.Err(); err != nil {
return err
}
return nil
}(); err != nil {
return nil, err
}
}
meters := make([]zeustypes.Meter, 0, len(valuesByRetentionDays))
for retentionDays, value := range valuesByRetentionDays {
meters = append(meters, zeustypes.NewMeter(provider.config.Name, value, provider.config.Unit, provider.config.Aggregation, window, buildDimensions(orgID, retentionDays)))
}
// Empty windows still emit a sentinel so checkpoints can advance.
if len(meters) == 0 && len(segments) > 0 {
meters = append(meters, zeustypes.NewMeter(provider.config.Name, 0, provider.config.Unit, provider.config.Aggregation, window, buildDimensions(orgID, segments[len(segments)-1].DefaultDays)))
}
return meters, nil
}
func buildOriginQuery(meterName string) (string, []any) {
sb := sqlbuilder.NewSelectBuilder()
sb.Select("toInt64(ifNull(min(unix_milli), 0))")
sb.From(telemetrymeter.DBName + "." + telemetrymeter.SamplesTableName)
sb.Where(sb.Equal("metric_name", meterName))
return sb.BuildWithFlavor(sqlbuilder.ClickHouse)
}
func buildQuery(meterName string, segment *retentiontypes.RetentionPolicySegment) (string, []any, error) {
retentionExpr, err := buildRetentionMultiIfSQL(segment.Rules, segment.DefaultDays)
if err != nil {
return "", nil, err
}
selects := []string{
retentionExpr + " AS retention_days",
"toInt64(ifNull(sum(value), 0)) AS value",
}
sb := sqlbuilder.NewSelectBuilder()
sb.Select(selects...)
sb.From(telemetrymeter.DBName + "." + telemetrymeter.SamplesTableName)
sb.Where(
sb.Equal("metric_name", meterName),
sb.GTE("unix_milli", segment.StartMs),
sb.LT("unix_milli", segment.EndMs),
)
sb.GroupBy("retention_days")
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
return query, args, nil
}
func buildRetentionMultiIfSQL(rules []retentiontypes.CustomRetentionRule, defaultDays int) (string, error) {
if defaultDays <= 0 {
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "non-positive default retention %d", defaultDays)
}
if len(rules) == 0 {
return "toInt32(" + strconv.Itoa(defaultDays) + ")", nil
}
arms := make([]string, 0, 2*len(rules)+1)
for ruleIndex, rule := range rules {
if rule.TTLDays <= 0 {
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "rule %d has non-positive ttl_days %d", ruleIndex, rule.TTLDays)
}
conditionExpr, err := buildRuleConditionSQL(ruleIndex, rule)
if err != nil {
return "", err
}
arms = append(arms, conditionExpr)
arms = append(arms, strconv.Itoa(rule.TTLDays))
}
arms = append(arms, strconv.Itoa(defaultDays))
return "toInt32(multiIf(" + strings.Join(arms, ", ") + "))", nil
}
func buildRuleConditionSQL(ruleIndex int, rule retentiontypes.CustomRetentionRule) (string, error) {
if len(rule.Filters) == 0 {
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "rule %d has no filters", ruleIndex)
}
filterExprs := make([]string, 0, len(rule.Filters))
for filterIndex, filter := range rule.Filters {
if !labelKeyPattern.MatchString(filter.Key) {
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "rule %d filter %d has invalid key %q", ruleIndex, filterIndex, filter.Key)
}
if len(filter.Values) == 0 {
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "rule %d filter %d has no values", ruleIndex, filterIndex)
}
quoted := make([]string, len(filter.Values))
for valueIndex, value := range filter.Values {
if !labelValuePattern.MatchString(value) {
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "rule %d filter %d value %d is invalid %q", ruleIndex, filterIndex, valueIndex, value)
}
quoted[valueIndex] = "'" + value + "'"
}
filterExprs = append(filterExprs, fmt.Sprintf("JSONExtractString(labels, '%s') IN (%s)", filter.Key, strings.Join(quoted, ", ")))
}
return strings.Join(filterExprs, " AND "), nil
}
func buildDimensions(orgID valuer.UUID, retentionDays int) map[string]string {
retentionDurationSeconds := int64(retentionDays) * 24 * 60 * 60 // seconds
return zeustypes.NewDimensions(
zeustypes.OrganizationID.String(orgID.StringValue()),
zeustypes.RetentionDuration.String(strconv.FormatInt(retentionDurationSeconds, 10)),
)
}

View File

@@ -1,318 +0,0 @@
package httpmeterreporter
import (
"context"
"encoding/json"
"fmt"
"log/slog"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/metercollector"
"github.com/SigNoz/signoz/pkg/meterreporter"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/featuretypes"
"github.com/SigNoz/signoz/pkg/types/licensetypes"
"github.com/SigNoz/signoz/pkg/types/zeustypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/SigNoz/signoz/pkg/zeus"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/trace"
)
var _ factory.ServiceWithHealthy = (*Provider)(nil)
type Provider struct {
settings factory.ScopedProviderSettings
config meterreporter.Config
collectorsByName map[zeustypes.MeterName]metercollector.MeterCollector
flagger flagger.Flagger
licensing licensing.Licensing
orgGetter organization.Getter
zeus zeus.Zeus
healthyC chan struct{}
stopC chan struct{}
metrics *reporterMetrics
}
func NewFactory(collectorFactories factory.NamedMap[factory.ProviderFactory[metercollector.MeterCollector, metercollector.Config]], collectorConfigs []metercollector.Config, flagger flagger.Flagger, licensing licensing.Licensing, orgGetter organization.Getter, zeus zeus.Zeus) factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config] {
return factory.NewProviderFactory(factory.MustNewName("http"), func(ctx context.Context, providerSettings factory.ProviderSettings, config meterreporter.Config) (meterreporter.Reporter, error) {
return newProvider(ctx, providerSettings, config, collectorFactories, collectorConfigs, flagger, licensing, orgGetter, zeus)
},
)
}
func newProvider(
ctx context.Context,
providerSettings factory.ProviderSettings,
config meterreporter.Config,
collectorFactories factory.NamedMap[factory.ProviderFactory[metercollector.MeterCollector, metercollector.Config]],
collectorConfigs []metercollector.Config,
flagger flagger.Flagger,
licensing licensing.Licensing,
orgGetter organization.Getter,
zeus zeus.Zeus,
) (*Provider, error) {
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/ee/meterreporter/httpmeterreporter")
collectorsByName := map[zeustypes.MeterName]metercollector.MeterCollector{}
for _, collectorConfig := range collectorConfigs {
collector, err := factory.NewProviderFromNamedMap(ctx, providerSettings, collectorConfig, collectorFactories, collectorConfig.Provider)
if err != nil {
return nil, err
}
if _, exists := collectorsByName[collector.Name()]; exists {
return nil, errors.Newf(errors.TypeAlreadyExists, errors.CodeAlreadyExists, "duplicate meter collector %q", collector.Name())
}
collectorsByName[collector.Name()] = collector
}
metrics, err := newReporterMetrics(settings.Meter())
if err != nil {
return nil, err
}
return &Provider{
settings: settings,
config: config,
collectorsByName: collectorsByName,
flagger: flagger,
licensing: licensing,
orgGetter: orgGetter,
zeus: zeus,
healthyC: make(chan struct{}),
stopC: make(chan struct{}),
metrics: metrics,
}, nil
}
func (provider *Provider) Start(ctx context.Context) error {
close(provider.healthyC)
provider.collect(ctx)
ticker := time.NewTicker(provider.config.Interval)
defer ticker.Stop()
for {
select {
case <-provider.stopC:
return nil
case <-ticker.C:
provider.collect(ctx)
}
}
}
func (provider *Provider) collect(ctx context.Context) {
ctx, span := provider.settings.Tracer().Start(ctx, "meterreporter.Collect", trace.WithAttributes(attribute.String("meterreporter.provider", "http")))
defer span.End()
orgs, err := provider.orgGetter.ListByOwnedKeyRange(ctx)
if err != nil {
span.RecordError(err)
provider.settings.Logger().ErrorContext(ctx, "failed to get orgs data", errors.Attr(err))
return
}
for _, org := range orgs {
evalCtx := featuretypes.NewFlaggerEvaluationContext(org.ID)
if !provider.flagger.BooleanOrEmpty(ctx, flagger.FeatureUseMeterReporter, evalCtx) {
provider.settings.Logger().DebugContext(ctx, "meter reporter disabled for org, skipping reporting", slog.String("org_id", org.ID.StringValue()))
continue
}
license, err := provider.licensing.GetActive(ctx, org.ID)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
provider.settings.Logger().DebugContext(ctx, "no active license found for org, skipping reporting", slog.String("org_id", org.ID.StringValue()))
continue
}
span.RecordError(err)
provider.settings.Logger().ErrorContext(ctx, "failed to fetch active license for org", errors.Attr(err), slog.String("org_id", org.ID.StringValue()))
return
}
if err := provider.collectOrg(ctx, org, license); err != nil {
span.RecordError(err)
provider.settings.Logger().ErrorContext(ctx, "failed to collect meters", errors.Attr(err), slog.String("org_id", org.ID.StringValue()))
}
}
}
func (provider *Provider) Stop(ctx context.Context) error {
close(provider.stopC)
return nil
}
func (provider *Provider) Healthy() <-chan struct{} {
return provider.healthyC
}
func (provider *Provider) collectOrg(ctx context.Context, org *types.Organization, license *licensetypes.License) error {
now := time.Now().UTC()
// Use one timestamp so a tick cannot straddle midnight.
todayStart := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.UTC)
if provider.config.Backfill {
checkpointsByMeter, err := provider.checkpoints(ctx, license.Key)
if err != nil {
return err
}
nextByCollector := provider.nextDays(license, todayStart, checkpointsByMeter)
start, end, ok := backfillRange(nextByCollector, todayStart)
if ok {
for day := start; !day.After(end); day = day.AddDate(0, 0, 1) {
eligible := eligibleCollectors(provider.collectorsByName, nextByCollector, day)
if len(eligible) == 0 {
continue
}
window, err := zeustypes.NewMeterWindow(day.UnixMilli(), day.AddDate(0, 0, 1).UnixMilli(), true)
if err != nil {
return err
}
if err := provider.report(ctx, org.ID, license, window, eligible); err != nil {
provider.settings.Logger().WarnContext(ctx, "failed to backfill for day", errors.Attr(err), slog.String("date", day.Format("2006-01-02")))
return err
}
}
}
}
// Today's partial window: every collector is always eligible (next <= today).
if now.UnixMilli() > todayStart.UnixMilli() {
todayWindow, err := zeustypes.NewMeterWindow(todayStart.UnixMilli(), now.UnixMilli(), false)
if err != nil {
return err
}
return provider.report(ctx, org.ID, license, todayWindow, provider.collectorsByName)
}
return nil
}
func (provider *Provider) checkpoints(ctx context.Context, licenseKey string) (map[string]time.Time, error) {
list, err := provider.zeus.ListMeterCheckpoints(ctx, licenseKey)
if err != nil {
provider.metrics.checkpoints.Add(ctx, 1, metric.WithAttributes(errors.TypeAttr(err)))
return nil, err
}
provider.metrics.checkpoints.Add(ctx, 1)
checkpointsByMeter := make(map[string]time.Time, len(list))
for _, checkpoint := range list {
checkpointsByMeter[checkpoint.Name] = checkpoint.StartDate.UTC()
}
return checkpointsByMeter, nil
}
func (provider *Provider) nextDays(license *licensetypes.License, todayStart time.Time, checkpointsByMeter map[string]time.Time) map[zeustypes.MeterName]time.Time {
nextByCollector := make(map[zeustypes.MeterName]time.Time, len(provider.collectorsByName))
licenseCreatedAt := license.CreatedAt.UTC()
licenseCreatedAtDay := time.Date(licenseCreatedAt.Year(), licenseCreatedAt.Month(), licenseCreatedAt.Day(), 0, 0, 0, 0, time.UTC)
for _, collector := range provider.collectorsByName {
checkpoint, hasCheckpoint := checkpointsByMeter[collector.Name().String()]
nextByCollector[collector.Name()] = nextReportableDay(licenseCreatedAtDay, todayStart, checkpoint, hasCheckpoint)
}
return nextByCollector
}
func nextReportableDay(licenseCreatedAtDay time.Time, todayStart time.Time, checkpoint time.Time, hasCheckpoint bool) time.Time {
next := licenseCreatedAtDay
if next.IsZero() {
next = todayStart
}
if hasCheckpoint {
checkpointNext := checkpoint.AddDate(0, 0, 1)
if checkpointNext.After(next) {
next = checkpointNext
}
}
return next
}
func (provider *Provider) report(ctx context.Context, orgID valuer.UUID, license *licensetypes.License, window zeustypes.MeterWindow, collectors map[zeustypes.MeterName]metercollector.MeterCollector) error {
date := time.UnixMilli(window.StartUnixMilli).UTC().Format("2006-01-02")
meters := make([]zeustypes.Meter, 0, len(collectors))
for _, collector := range collectors {
meterAttr := attribute.String("signoz.meter.name", collector.Name().String())
collectedReadings, err := collector.Collect(ctx, orgID, license, window)
if err != nil {
provider.metrics.collections.Add(ctx, 1, metric.WithAttributes(meterAttr, errors.TypeAttr(err)))
continue
}
provider.metrics.collections.Add(ctx, 1, metric.WithAttributes(meterAttr))
meters = append(meters, collectedReadings...)
}
if len(meters) == 0 {
return nil
}
idempotencyKey := fmt.Sprintf("meterreporter:%s", date)
body, err := json.Marshal(meters)
if err != nil {
provider.metrics.reports.Add(ctx, 1, metric.WithAttributes(errors.TypeAttr(err)))
return err
}
if err := provider.zeus.PutMetersV3(ctx, license.Key, idempotencyKey, body); err != nil {
provider.metrics.reports.Add(ctx, 1, metric.WithAttributes(errors.TypeAttr(err)))
return err
}
provider.metrics.reports.Add(ctx, 1)
provider.metrics.meters.Add(ctx, int64(len(meters)))
return nil
}
// backfillRange returns the inclusive sealed-day range ending at yesterday.
func backfillRange(nextByCollector map[zeustypes.MeterName]time.Time, todayStart time.Time) (start, end time.Time, ok bool) {
yesterday := todayStart.AddDate(0, 0, -1)
for _, next := range nextByCollector {
if !next.Before(todayStart) {
continue
}
if start.IsZero() || next.Before(start) {
start = next
}
}
if start.IsZero() || start.After(yesterday) {
return time.Time{}, time.Time{}, false
}
return start, yesterday, true
}
func eligibleCollectors(collectors map[zeustypes.MeterName]metercollector.MeterCollector, nextByCollector map[zeustypes.MeterName]time.Time, day time.Time) map[zeustypes.MeterName]metercollector.MeterCollector {
eligible := make(map[zeustypes.MeterName]metercollector.MeterCollector, len(collectors))
for name, collector := range collectors {
if !nextByCollector[name].After(day) {
eligible[name] = collector
}
}
return eligible
}

View File

@@ -1,48 +0,0 @@
package httpmeterreporter
import (
"github.com/SigNoz/signoz/pkg/errors"
"go.opentelemetry.io/otel/metric"
)
type reporterMetrics struct {
checkpoints metric.Int64Counter
reports metric.Int64Counter
collections metric.Int64Counter
meters metric.Int64Counter
}
func newReporterMetrics(meter metric.Meter) (*reporterMetrics, error) {
var errs error
checkpoints, err := meter.Int64Counter("signoz.meterreporter.checkpoints", metric.WithDescription("Zeus meter checkpoint fetches."), metric.WithUnit("{checkpoint}"))
if err != nil {
errs = errors.Join(errs, err)
}
reports, err := meter.Int64Counter("signoz.meterreporter.reports", metric.WithDescription("Meter reports shipped to Zeus."), metric.WithUnit("{report}"))
if err != nil {
errs = errors.Join(errs, err)
}
collections, err := meter.Int64Counter("signoz.meterreporter.collections", metric.WithDescription("Per-meter collect calls."), metric.WithUnit("{collection}"))
if err != nil {
errs = errors.Join(errs, err)
}
meters, err := meter.Int64Counter("signoz.meterreporter.meters", metric.WithDescription("Meter readings shipped to Zeus."), metric.WithUnit("{meter}"))
if err != nil {
errs = errors.Join(errs, err)
}
if errs != nil {
return nil, errs
}
return &reporterMetrics{
checkpoints: checkpoints,
reports: reports,
collections: collections,
meters: meters,
}, nil
}

View File

@@ -150,72 +150,6 @@ func (provider *Provider) PutMetersV2(ctx context.Context, key string, data []by
return err
}
func (provider *Provider) PutMetersV3(ctx context.Context, key string, idempotencyKey string, data []byte) error {
headers := http.Header{}
if idempotencyKey != "" {
headers.Set("X-Idempotency-Key", idempotencyKey)
}
_, err := provider.doWithHeaders(
ctx,
provider.config.URL.JoinPath("/v2/meters"),
http.MethodPost,
key,
data,
headers,
)
return err
}
func (provider *Provider) ListMeterCheckpoints(ctx context.Context, key string) ([]zeustypes.MeterCheckpoint, error) {
response, err := provider.do(
ctx,
provider.config.URL.JoinPath("/v2/meters/checkpoints"),
http.MethodGet,
key,
nil,
)
if err != nil {
return nil, err
}
checkpointValues := gjson.GetBytes(response, "data")
if !checkpointValues.Exists() || checkpointValues.Type == gjson.Null {
return nil, errors.Newf(errors.TypeInternal, zeus.ErrCodeResponseMalformed, "meter checkpoints are required")
}
if !checkpointValues.IsArray() {
return nil, errors.Newf(errors.TypeInternal, zeus.ErrCodeResponseMalformed, "meter checkpoints must be an array")
}
checkpointResults := checkpointValues.Array()
checkpoints := make([]zeustypes.MeterCheckpoint, 0, len(checkpointResults))
for _, checkpointValue := range checkpointResults {
name := checkpointValue.Get("name").String()
if name == "" {
return nil, errors.Newf(errors.TypeInternal, zeus.ErrCodeResponseMalformed, "meter checkpoint name is required")
}
startDateString := checkpointValue.Get("start_date").String()
if startDateString == "" {
return nil, errors.Newf(errors.TypeInternal, zeus.ErrCodeResponseMalformed, "meter checkpoint start_date is required for %q", name)
}
startDate, err := time.Parse("2006-01-02", startDateString)
if err != nil {
return nil, errors.Wrapf(err, errors.TypeInternal, zeus.ErrCodeResponseMalformed, "parse meter checkpoint start_date %q for %q", startDateString, name)
}
checkpoints = append(checkpoints, zeustypes.MeterCheckpoint{
Name: name,
StartDate: startDate,
})
}
return checkpoints, nil
}
func (provider *Provider) PutProfile(ctx context.Context, key string, profile *zeustypes.PostableProfile) error {
body, err := json.Marshal(profile)
if err != nil {
@@ -251,21 +185,12 @@ func (provider *Provider) PutHost(ctx context.Context, key string, host *zeustyp
}
func (provider *Provider) do(ctx context.Context, url *url.URL, method string, key string, requestBody []byte) ([]byte, error) {
return provider.doWithHeaders(ctx, url, method, key, requestBody, nil)
}
func (provider *Provider) doWithHeaders(ctx context.Context, url *url.URL, method string, key string, requestBody []byte, extraHeaders http.Header) ([]byte, error) {
request, err := http.NewRequestWithContext(ctx, method, url.String(), bytes.NewBuffer(requestBody))
if err != nil {
return nil, err
}
request.Header.Set("X-Signoz-Cloud-Api-Key", key)
request.Header.Set("Content-Type", "application/json")
for k, vs := range extraHeaders {
for _, v := range vs {
request.Header.Add(k, v)
}
}
response, err := provider.httpClient.Do(request)
if err != nil {

View File

@@ -1,19 +0,0 @@
---
description: Prefer SigNoz UI and icons across frontend code
globs: **/*.{ts,tsx,js,jsx}
alwaysApply: true
---
# UI Components and Icons Source of Truth
For all frontend implementation work in this repository:
- Always use UI primitives/components from `@signozhq/ui`.
- Always use icons from `@signozhq/icons`.
- Do not introduce new usage of icon libraries directly (for example `lucide-react`) in app code.
- Do not mix multiple component systems for the same UI surface when an equivalent exists in `@signozhq/ui`.
## Migration guidance
- If touching a file that already uses non-`@signozhq/icons` icons, prefer migrating that file to `@signozhq/icons` as part of the same change when practical.
- If a required component or icon is missing from SigNoz packages, call this out explicitly in the PR/summary before introducing alternatives.

View File

@@ -1,7 +1,7 @@
#!/bin/sh
. "$(dirname "$0")/_/husky.sh"
cd frontend && pnpm run commitlint --edit $1
cd frontend && yarn run commitlint --edit $1
branch="$(git rev-parse --abbrev-ref HEAD)"

View File

@@ -1,4 +1,4 @@
#!/bin/sh
. "$(dirname "$0")/_/husky.sh"
cd frontend && pnpm lint-staged
cd frontend && yarn lint-staged

View File

@@ -1,5 +1 @@
registry = 'https://registry.npmjs.org/'
engine-strict=true
public-hoist-pattern[]=@commitlint*
public-hoist-pattern[]=commitlint
registry = 'https://registry.npmjs.org/'

View File

@@ -291,8 +291,6 @@
// Prevents window.open(path), window.location.origin + path, window.location.href = path
"signoz/no-antd-components": "error",
// Prevents the usage of specific antd components in favor of our lib
"signoz/no-signozhq-ui-barrel": "error",
// Forces subpath imports (@signozhq/ui/<component>) instead of the eagerly-loaded barrel
"no-restricted-globals": [
"error",
{
@@ -413,7 +411,7 @@
"jsx-a11y/media-has-caption": "warn",
"jsx-a11y/mouse-events-have-key-events": "warn",
"jsx-a11y/no-access-key": "error",
"jsx-a11y/no-autofocus": "off",
"jsx-a11y/no-autofocus": "warn",
"jsx-a11y/no-distracting-elements": "error",
"jsx-a11y/no-redundant-roles": "warn",
"jsx-a11y/role-has-required-aria-props": "warn",
@@ -497,8 +495,7 @@
"overrides": [
{
"files": [
"src/api/generated/**/*.ts",
"src/api/ai-assistant/**/*.ts"
"src/api/generated/**/*.ts"
],
"rules": {
"@typescript-eslint/explicit-function-return-type": "off",

View File

@@ -28,8 +28,8 @@ Follow the steps below
1. ```git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend```
1. change baseURL to ```<test environment URL>``` in file ```src/constants/env.ts```
1. ```pnpm install```
1. ```pnpm dev```
1. ```yarn install```
1. ```yarn dev```
```Note: Please ping us in #contributing channel in our slack community and we will DM you with <test environment URL>```
@@ -41,7 +41,7 @@ This project was bootstrapped with [Create React App](https://github.com/faceboo
In the project directory, you can run:
### `pnpm start`
### `yarn start`
Runs the app in the development mode.\
Open [http://localhost:3301](http://localhost:3301) to view it in the browser.
@@ -49,12 +49,12 @@ Open [http://localhost:3301](http://localhost:3301) to view it in the browser.
The page will reload if you make edits.\
You will also see any lint errors in the console.
### `pnpm test`
### `yarn test`
Launches the test runner in the interactive watch mode.\
See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information.
### `pnpm build`
### `yarn build`
Builds the app for production to the `build` folder.\
It correctly bundles React in production mode and optimizes the build for the best performance.
@@ -64,7 +64,7 @@ Your app is ready to be deployed!
See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information.
### `pnpm eject`
### `yarn eject`
**Note: this is a one-way operation. Once you `eject`, you cant go back!**
@@ -100,6 +100,6 @@ This section has moved here: [https://facebook.github.io/create-react-app/docs/a
This section has moved here: [https://facebook.github.io/create-react-app/docs/deployment](https://facebook.github.io/create-react-app/docs/deployment)
### `pnpm build` fails to minify
### `yarn build` fails to minify
This section has moved here: [https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify](https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify)

View File

@@ -1,28 +0,0 @@
import React from 'react';
const IconMock = React.forwardRef<SVGSVGElement, React.SVGProps<SVGSVGElement>>(
(props, ref) => <svg ref={ref} {...props} />,
);
IconMock.displayName = 'IconMock';
// Returns a Proxy that resolves any named export to IconMock by default,
// so `import { AnyIcon } from '@signozhq/icons'` always returns a valid component.
// Pass `overrides` to swap in test-specific stubs (e.g. icons with data-testid).
export function createIconsMock(
overrides: Record<string, unknown> = {},
): Record<string | symbol, unknown> {
return new Proxy(
{ __esModule: true, default: IconMock, ...overrides } as Record<
string | symbol,
unknown
>,
{
get(target, prop: string | symbol): unknown {
if (prop in target) {
return target[prop as string];
}
return IconMock;
},
},
);
}

View File

@@ -1,3 +0,0 @@
import { createIconsMock } from './createIconsMock';
module.exports = createIconsMock();

View File

@@ -24,8 +24,8 @@ const config: Config.InitialOptions = {
'^.*/useSafeNavigate$': USE_SAFE_NAVIGATE_MOCK_PATH,
'^constants/env$': '<rootDir>/__mocks__/env.ts',
'^src/constants/env$': '<rootDir>/__mocks__/env.ts',
'^@signozhq/icons$': '<rootDir>/__mocks__/signozhqIconsMock.tsx',
'^test-mocks/(.*)$': '<rootDir>/__mocks__/$1',
'^@signozhq/icons$':
'<rootDir>/node_modules/@signozhq/icons/dist/index.esm.js',
'^react-syntax-highlighter/dist/esm/(.*)$':
'<rootDir>/node_modules/react-syntax-highlighter/dist/cjs/$1',
'^@signozhq/(?!ui(?:/|$))([^/]+)$':
@@ -46,11 +46,7 @@ const config: Config.InitialOptions = {
},
transformIgnorePatterns: [
// @chenglou/pretext is ESM-only; @signozhq/ui pulls it in via text-ellipsis.
// Pattern 1: allow .pnpm virtual store through (handled by pattern 2), plus root-level ESM packages.
'node_modules/(?!(\\.pnpm|lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios|@chenglou/pretext|@signozhq/design-tokens|@signozhq|date-fns|d3-interpolate|d3-color|api|@codemirror|@lezer|@marijn|@grafana|nuqs|uuid)/)',
// Pattern 2: pnpm virtual store — ignore everything except ESM-only packages.
// pnpm encodes scoped packages as @scope+name@version, so match on scope prefix.
'node_modules/\\.pnpm/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios|@chenglou|@signozhq|date-fns|d3-interpolate|d3-color|api|@codemirror|@lezer|@marijn|@grafana|nuqs|uuid)[^/]*/node_modules)',
'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios|@chenglou/pretext|@signozhq/design-tokens|@signozhq/table|@signozhq/calendar|@signozhq/input|@signozhq/popover|@signozhq/*|date-fns|d3-interpolate|d3-color|api|@codemirror|@lezer|@marijn|@grafana|nuqs)/)',
],
setupFilesAfterEnv: ['<rootDir>/jest.setup.ts'],
testPathIgnorePatterns: ['/node_modules/', '/public/'],

View File

@@ -6,7 +6,6 @@
* Adds custom matchers from the react testing library to all tests
*/
import '@testing-library/jest-dom';
import '@testing-library/jest-dom/extend-expect';
import 'jest-styled-components';
import { server } from './src/mocks-server/server';
@@ -29,18 +28,6 @@ if (!HTMLElement.prototype.scrollIntoView) {
HTMLElement.prototype.scrollIntoView = function (): void {};
}
if (typeof window.IntersectionObserver === 'undefined') {
class IntersectionObserverMock {
observe(): void {}
unobserve(): void {}
disconnect(): void {}
takeRecords(): IntersectionObserverEntry[] {
return [];
}
}
(window as any).IntersectionObserver = IntersectionObserverMock;
}
// Patch getComputedStyle to handle CSS parsing errors from @signozhq/* packages.
// These packages inject CSS at import time via style-inject / vite-plugin-css-injected-by-js.
// jsdom's nwsapi cannot parse some of the injected selectors (e.g. Tailwind's :animate-in),

View File

@@ -80,7 +80,7 @@ export default defineConfig({
header: (info: { title: string; version: string }): string[] => [
`! Do not edit manually`,
`* The file has been auto-generated using Orval for SigNoz`,
`* regenerate with 'pnpm generate:api'`,
`* regenerate with 'yarn generate:api'`,
...(info.title ? [info.title] : []),
...(info.version ? [`OpenAPI spec version: ${info.version}`] : []),
],

View File

@@ -4,7 +4,6 @@
"description": "",
"type": "module",
"scripts": {
"preinstall": "npx only-allow pnpm",
"i18n:generate-hash": "node ./i18-generate-hash.cjs",
"dev": "vite",
"build": "vite build",
@@ -19,7 +18,7 @@
"jest": "jest",
"jest:coverage": "jest --coverage",
"jest:watch": "jest --watch",
"postinstall": "pnpm i18n:generate-hash && (is-ci || pnpm husky:configure) && node scripts/update-registry.cjs",
"postinstall": "yarn i18n:generate-hash && (is-ci || yarn husky:configure) && node scripts/update-registry.cjs",
"husky:configure": "cd .. && husky install frontend/.husky && cd frontend && chmod ug+x .husky/*",
"commitlint": "commitlint --edit $1",
"test": "jest",
@@ -27,17 +26,15 @@
"generate:api": "orval --config ./orval.config.ts && sh scripts/post-types-generation.sh"
},
"engines": {
"node": ">=22.0.0",
"pnpm": ">=10.0.0 <11.0.0"
"node": ">=22.0.0"
},
"author": "",
"license": "ISC",
"dependencies": {
"@ant-design/colors": "6.0.0",
"@ant-design/icons": "4.8.0",
"@codemirror/autocomplete": "6.18.6",
"@codemirror/lang-javascript": "6.2.3",
"@codemirror/state": "6.5.2",
"@codemirror/view": "6.36.6",
"@dnd-kit/core": "6.1.0",
"@dnd-kit/modifiers": "7.0.0",
"@dnd-kit/sortable": "8.0.0",
@@ -51,7 +48,7 @@
"@sentry/react": "8.41.0",
"@sentry/vite-plugin": "2.22.6",
"@signozhq/design-tokens": "2.1.4",
"@signozhq/icons": "0.4.0",
"@signozhq/icons": "0.1.0",
"@signozhq/resizable": "0.0.2",
"@signozhq/ui": "0.0.18",
"@tanstack/react-table": "8.21.3",
@@ -100,6 +97,7 @@
"jest": "30.2.0",
"js-base64": "^3.7.2",
"lodash-es": "^4.17.21",
"lucide-react": "0.498.0",
"mini-css-extract-plugin": "2.4.5",
"motion": "12.4.13",
"nuqs": "2.8.8",
@@ -107,7 +105,6 @@
"overlayscrollbars-react": "^0.5.6",
"papaparse": "5.4.1",
"posthog-js": "1.298.0",
"rc-select": "14.10.0",
"rc-tween-one": "3.0.6",
"react": "18.2.0",
"react-addons-update": "15.6.3",
@@ -137,7 +134,6 @@
"redux": "^4.0.5",
"redux-thunk": "^2.3.0",
"rehype-raw": "7.0.0",
"remark-gfm": "^3.0.1",
"rollup-plugin-visualizer": "7.0.0",
"rrule": "2.8.1",
"stream": "^0.0.2",
@@ -172,21 +168,18 @@
"@babel/preset-env": "^7.22.14",
"@babel/preset-react": "^7.12.13",
"@babel/preset-typescript": "^7.21.4",
"@commitlint/cli": "20.4.4",
"@commitlint/config-conventional": "20.4.4",
"@commitlint/cli": "^20.4.2",
"@commitlint/config-conventional": "^20.4.2",
"@faker-js/faker": "9.3.0",
"@jest/globals": "30.2.0",
"@jest/types": "30.2.0",
"@testing-library/jest-dom": "5.16.5",
"@testing-library/react": "13.4.0",
"@testing-library/user-event": "14.4.3",
"@types/color": "^3.0.3",
"@types/crypto-js": "4.2.2",
"@types/d3-hierarchy": "1.1.11",
"@types/dompurify": "^2.4.0",
"@types/event-source-polyfill": "^1.0.0",
"@types/fontfaceobserver": "2.1.0",
"@types/history": "4.7.11",
"@types/jest": "30.0.0",
"@types/lodash-es": "^4.17.4",
"@types/mini-css-extract-plugin": "^2.5.1",
@@ -205,13 +198,11 @@
"@types/react-syntax-highlighter": "15.5.13",
"@types/redux-mock-store": "1.0.4",
"@types/styled-components": "^5.1.4",
"@types/testing-library__jest-dom": "^5.14.5",
"@types/uuid": "^8.3.1",
"@typescript/native-preview": "7.0.0-dev.20260430.1",
"@typescript/native-preview": "7.0.0-dev.20260421.2",
"autoprefixer": "10.4.19",
"babel-plugin-styled-components": "^1.12.0",
"eslint-plugin-sonarjs": "4.0.2",
"glob": "^13.0.6",
"husky": "^7.0.4",
"imagemin": "^8.0.1",
"imagemin-svgo": "^10.0.1",
@@ -222,7 +213,7 @@
"lint-staged": "^12.5.0",
"msw": "1.3.2",
"npm-run-all": "latest",
"orval": "7.21.0",
"orval": "7.18.0",
"oxfmt": "0.47.0",
"oxlint": "1.62.0",
"oxlint-tsgolint": "0.22.1",
@@ -239,7 +230,7 @@
"stylelint-scss": "7.0.0",
"svgo": "4.0.0",
"ts-api-utils": "2.4.0",
"ts-jest": "29.4.9",
"ts-jest": "29.4.6",
"ts-node": "^10.2.1",
"typescript-plugin-css-modules": "5.2.0",
"use-sync-external-store": "1.6.0",

View File

@@ -1,210 +0,0 @@
/**
* Rule: no-signozhq-ui-barrel
*
* Forbids importing from the `@signozhq/ui` barrel and requires the matching
* subpath instead.
*
* This rule catches:
* import { Typography } from '@signozhq/ui'
* import { Button, toast } from '@signozhq/ui'
* import '@signozhq/ui'
*
* And expects:
* import { Typography } from '@signozhq/ui/typography'
* import { Button } from '@signozhq/ui/button'
* import { toast } from '@signozhq/ui/sonner'
*
* Why: the barrel eagerly require()s every component (~90 of them) along with
* their Radix/cmdk/motion/react-day-picker dependencies. Under Jest this caused
* 5s timeouts and flaky tests after the Antd→@signozhq/ui Typography migration
* (#11199). Subpath imports (added in @signozhq/ui@0.0.18) load only what's
* used.
*
* The auto-generated `auto-import-registry.d.ts` is a pure declaration file
* that exists solely to nudge VS Code's auto-import indexer; its bare
* `import '@signozhq/ui';` is type-only and not emitted, so it is exempt.
*
* Autofix:
* Rewrites named imports to the matching subpath, splitting one statement
* into multiple when specifiers come from different subpaths. The
* export-name → subpath map is derived lazily from the installed
* `@signozhq/ui` dist `.d.ts` files. Imports we can't classify (namespace,
* default, side-effect, or unknown specifier) are reported without a fix.
*/
import { existsSync, readFileSync } from 'node:fs';
import { dirname, join } from 'node:path';
import { fileURLToPath } from 'node:url';
const ALLOWED_FILES = new Set(['auto-import-registry.d.ts']);
const PLUGIN_DIR = dirname(fileURLToPath(import.meta.url));
let exportMap = null;
function loadExportMap() {
if (exportMap === null) {
exportMap = buildExportMap();
}
return exportMap;
}
function buildExportMap() {
const map = new Map();
const root = findSignozUiRoot();
if (!root) return map;
let pkg;
try {
pkg = JSON.parse(readFileSync(join(root, 'package.json'), 'utf-8'));
} catch {
return map;
}
const subpathKeys = Object.keys(pkg.exports || {}).filter((k) => k !== '.');
for (const key of subpathKeys) {
const subpath = key.replace(/^\.\//, '');
const entry = join(root, 'dist', subpath, 'index.d.ts');
if (!existsSync(entry)) continue;
const names = new Set();
collectExportedNames(entry, names, new Set());
// First-wins: package.json subpath order is the canonical home for
// names re-exported across multiple subpaths (e.g. `ToggleColor` is
// declared in `toggle` and re-exported from `toggle-group`).
for (const name of names) {
if (!map.has(name)) map.set(name, subpath);
}
}
return map;
}
function findSignozUiRoot() {
let dir = PLUGIN_DIR;
while (true) {
const candidate = join(dir, 'node_modules', '@signozhq', 'ui');
if (existsSync(join(candidate, 'package.json'))) return candidate;
const parent = dirname(dir);
if (parent === dir) return null;
dir = parent;
}
}
function collectExportedNames(filepath, out, visited) {
if (visited.has(filepath) || !existsSync(filepath)) return;
visited.add(filepath);
let content;
try {
content = readFileSync(filepath, 'utf-8');
} catch {
return;
}
// `export * from './x.js'` / `export type * from './x.js'`
for (const m of content.matchAll(
/export\s+(?:type\s+)?\*\s+from\s+['"]([^'"]+)['"]/g,
)) {
collectExportedNames(resolveRelativeDts(filepath, m[1]), out, visited);
}
// `export { Foo, type Bar, Foo as Baz } from '...';` and `export { ... };`
for (const m of content.matchAll(/export\s+(?:type\s+)?\{([^}]*)\}/g)) {
for (const item of m[1].split(',')) {
const cleaned = item.trim().replace(/^type\s+/, '');
if (!cleaned) continue;
const idMatch = cleaned.match(
/^([A-Za-z_$][A-Za-z0-9_$]*)(?:\s+as\s+([A-Za-z_$][A-Za-z0-9_$]*))?$/,
);
if (idMatch) out.add(idMatch[2] || idMatch[1]);
}
}
// `export (declare) const|let|var|function|class|enum|type|interface Foo`
for (const m of content.matchAll(
/export\s+(?:declare\s+)?(?:const|let|var|function|class|enum|type|interface)\s+([A-Za-z_$][A-Za-z0-9_$]*)/g,
)) {
out.add(m[1]);
}
}
function resolveRelativeDts(fromFile, spec) {
const base = dirname(fromFile);
const stripped = spec.replace(/\.(js|mjs|cjs)$/, '');
const sibling = join(base, `${stripped}.d.ts`);
if (existsSync(sibling)) return sibling;
const indexed = join(base, stripped, 'index.d.ts');
if (existsSync(indexed)) return indexed;
return sibling;
}
function buildReplacement(node, map) {
const specifiers = node.specifiers || [];
if (specifiers.length === 0) return null;
for (const spec of specifiers) {
if (spec.type !== 'ImportSpecifier') return null;
if (spec.imported?.type !== 'Identifier') return null;
}
const quote = node.source.raw?.[0] === '"' ? '"' : "'";
const topLevelType = node.importKind === 'type';
const keyword = topLevelType ? 'import type' : 'import';
const groups = new Map();
for (const spec of specifiers) {
const importedName = spec.imported.name;
const subpath = map.get(importedName);
if (!subpath) return null;
const localName = spec.local.name;
const inlineType = !topLevelType && spec.importKind === 'type';
let text = inlineType ? 'type ' : '';
text += importedName;
if (localName !== importedName) text += ` as ${localName}`;
if (!groups.has(subpath)) groups.set(subpath, []);
groups.get(subpath).push(text);
}
const lines = [];
for (const [subpath, items] of groups) {
lines.push(
`${keyword} { ${items.join(', ')} } from ${quote}@signozhq/ui/${subpath}${quote};`,
);
}
return lines.join('\n');
}
export default {
meta: {
fixable: 'code',
},
create(context) {
const filename = context.filename || '';
const basename = filename.split(/[\\/]/).pop();
if (ALLOWED_FILES.has(basename)) {
return {};
}
return {
ImportDeclaration(node) {
if (node.source.value !== '@signozhq/ui') {
return;
}
const replacement = buildReplacement(node, loadExportMap());
const report = {
node: node.source,
message:
"Do not import from the '@signozhq/ui' barrel. Use the matching subpath instead (e.g. '@signozhq/ui/typography', '@signozhq/ui/button', '@signozhq/ui/sonner'). The barrel eagerly loads ~90 components and slows tests substantially.",
};
if (replacement) {
report.fix = (fixer) => fixer.replaceText(node, replacement);
}
context.report(report);
},
};
},
};

View File

@@ -10,7 +10,6 @@ import noNavigatorClipboard from './rules/no-navigator-clipboard.mjs';
import noUnsupportedAssetPattern from './rules/no-unsupported-asset-pattern.mjs';
import noRawAbsolutePath from './rules/no-raw-absolute-path.mjs';
import noAntdComponents from './rules/no-antd-components.mjs';
import noSignozhqUiBarrel from './rules/no-signozhq-ui-barrel.mjs';
export default {
meta: {
@@ -22,6 +21,5 @@ export default {
'no-unsupported-asset-pattern': noUnsupportedAssetPattern,
'no-raw-absolute-path': noRawAbsolutePath,
'no-antd-components': noAntdComponents,
'no-signozhq-ui-barrel': noSignozhqUiBarrel,
},
};

22368
frontend/pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@@ -26,6 +26,5 @@
"dashboard_unsave_changes": "There are unsaved changes in the Query builder, please stage and run the query or the changes will be lost. Press OK to discard.",
"dashboard_save_changes": "Your graph built with {{queryTag}} query will be saved. Press OK to confirm.",
"your_graph_build_with": "Your graph built with",
"dashboard_ok_confirm": "query will be saved. Press OK to confirm.",
"variable_name_already_exists": "Variable \"{{name}}\" already exists"
"dashboard_ok_confirm": "query will be saved. Press OK to confirm."
}

View File

@@ -30,6 +30,5 @@
"dashboard_unsave_changes": "There are unsaved changes in the Query builder, please stage and run the query or the changes will be lost. Press OK to discard.",
"dashboard_save_changes": "Your graph built with {{queryTag}} query will be saved. Press OK to confirm.",
"your_graph_build_with": "Your graph built with",
"dashboard_ok_confirm": "query will be saved. Press OK to confirm.",
"variable_name_already_exists": "Variable \"{{name}}\" already exists"
"dashboard_ok_confirm": "query will be saved. Press OK to confirm."
}

View File

@@ -16,7 +16,7 @@ echo "\n✅ Tag files renamed to index.ts"
# Format generated files
echo "\n\n---\nRunning prettier...\n"
if ! pnpm prettify src/api/generated; then
if ! yarn prettify src/api/generated; then
echo "Formatting failed!"
exit 1
fi
@@ -25,7 +25,7 @@ echo "\n✅ Formatting successful"
# Fix linting issues
echo "\n\n---\nRunning lint...\n"
if ! pnpm lint:generated; then
if ! yarn lint:generated; then
echo "Lint check failed! Please fix linting errors before proceeding."
exit 1
fi

View File

@@ -17,12 +17,6 @@ registered_languages=$(grep -oP "registerLanguage\('\K[^']+" "$SYNTAX_HIGHLIGHTE
missing_languages=()
for lang in $md_languages; do
# Skip ai-* block markers — these are custom AI block types rendered by
# RichCodeBlock as React components (e.g. ActionBlock, LineChartBlock),
# not real syntax languages, so they don't need highlighter registration.
if [[ "$lang" == ai-* ]]; then
continue
fi
if ! echo "$registered_languages" | grep -qx "$lang"; then
missing_languages+=("$lang")
fi

View File

@@ -8,7 +8,6 @@ import { LOCALSTORAGE } from 'constants/localStorage';
import { ORG_PREFERENCES } from 'constants/orgPreferences';
import ROUTES from 'constants/routes';
import { useGetTenantLicense } from 'hooks/useGetTenantLicense';
import { useIsAIAssistantEnabled } from 'hooks/useIsAIAssistantEnabled';
import { isEmpty } from 'lodash-es';
import { useAppContext } from 'providers/App/App';
import { LicensePlatform, LicenseState } from 'types/api/licensesV3/getActive';
@@ -41,8 +40,6 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
} = useAppContext();
const isAdmin = user.role === USER_ROLES.ADMIN;
const isAIAssistantEnabled = useIsAIAssistantEnabled();
const mapRoutes = useMemo(
() =>
new Map(
@@ -102,10 +99,6 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
return <>{children}</>;
}
if (pathname.startsWith('/ai-assistant/') && !isAIAssistantEnabled) {
return <Redirect to={ROUTES.HOME} />;
}
// Check for workspace access restriction (cloud only)
const isCloudPlatform = activeLicense?.platform === LicensePlatform.CLOUD;

View File

@@ -164,17 +164,14 @@ function createMockAppContext(
featureFlags: [],
orgPreferences: createMockOrgPreferences(),
userPreferences: [],
hostsData: null,
isLoggedIn: true,
org: [{ createdAt: 0, id: 'org-id', displayName: 'Test Org' }],
isFetchingUser: false,
isFetchingActiveLicense: false,
isFetchingHosts: false,
isFetchingFeatureFlags: false,
isFetchingOrgPreferences: false,
userFetchError: null,
activeLicenseFetchError: null,
hostsFetchError: null,
featureFlagsFetchError: null,
orgPreferencesFetchError: null,
changelog: null,

View File

@@ -18,7 +18,6 @@ import AppLayout from 'container/AppLayout';
import Hex from 'crypto-js/enc-hex';
import HmacSHA256 from 'crypto-js/hmac-sha256';
import { KeyboardHotkeysProvider } from 'hooks/hotkeys/useKeyboardHotkeys';
import { useIsAIAssistantEnabled } from 'hooks/useIsAIAssistantEnabled';
import { useIsDarkMode, useThemeConfig } from 'hooks/useDarkMode';
import { useGetTenantLicense } from 'hooks/useGetTenantLicense';
import { NotificationProvider } from 'hooks/useNotifications';
@@ -61,21 +60,13 @@ function App(): JSX.Element {
org,
} = useAppContext();
const [routes, setRoutes] = useState<AppRoutes[]>(defaultRoutes);
const isAIAssistantEnabled = useIsAIAssistantEnabled();
const { hostname } = window.location;
const [pathname, setPathname] = useState(history.location.pathname);
const { hostname, pathname } = window.location;
const { isCloudUser, isEnterpriseSelfHostedUser } = useGetTenantLicense();
const [isSentryInitialized, setIsSentryInitialized] = useState(false);
useEffect(() => {
return history.listen((location) => {
setPathname(location.pathname);
});
}, []);
const enableAnalytics = useCallback(
(user: IUser): void => {
// wait for the required data to be loaded before doing init for anything!
@@ -221,27 +212,6 @@ function App(): JSX.Element {
activeLicenseFetchError,
]);
useEffect(() => {
if (!isLoggedInState) {
return;
}
setRoutes((prev) => {
const hasAi = prev.some((r) => r.path === ROUTES.AI_ASSISTANT);
if (isAIAssistantEnabled === hasAi) {
return prev;
}
if (isAIAssistantEnabled) {
const aiRoute = defaultRoutes.find((r) => r.path === ROUTES.AI_ASSISTANT);
if (!aiRoute) {
return prev;
}
return [...prev.filter((r) => r.path !== ROUTES.AI_ASSISTANT), aiRoute];
}
return prev.filter((r) => r.path !== ROUTES.AI_ASSISTANT);
});
}, [isLoggedInState, isAIAssistantEnabled]);
const isDarkMode = useIsDarkMode();
useEffect(() => {
@@ -251,8 +221,7 @@ function App(): JSX.Element {
useEffect(() => {
if (
pathname === ROUTES.ONBOARDING ||
pathname.startsWith('/public/dashboard/') ||
pathname.startsWith('/ai-assistant/')
pathname.startsWith('/public/dashboard/')
) {
window.Pylon?.('hideChatBubble');
} else {

View File

@@ -324,10 +324,3 @@ export const MeterExplorerPage = Loadable(
() =>
import(/* webpackChunkName: "Meter Explorer Page" */ 'pages/MeterExplorer'),
);
export const AIAssistantPage = Loadable(
() =>
import(
/* webpackChunkName: "AI Assistant Page" */ 'pages/AIAssistantPage/AIAssistantPage'
),
);

View File

@@ -2,7 +2,6 @@ import { RouteProps } from 'react-router-dom';
import ROUTES from 'constants/routes';
import {
AIAssistantPage,
AlertHistory,
AlertOverview,
AlertTypeSelectionPage,
@@ -508,13 +507,6 @@ const routes: AppRoutes[] = [
key: 'API_MONITORING',
isPrivate: true,
},
{
path: ROUTES.AI_ASSISTANT,
exact: true,
component: AIAssistantPage,
key: 'AI_ASSISTANT',
isPrivate: true,
},
];
export const SUPPORT_ROUTE: AppRoutes = {

View File

@@ -1,80 +0,0 @@
import axios, { InternalAxiosRequestConfig } from 'axios';
import {
interceptorRejected,
interceptorsRequestBasePath,
interceptorsRequestResponse,
interceptorsResponse,
} from 'api';
import { getSigNozInstanceUrl } from 'utils/signozInstanceUrl';
/** Path-only base for the AI Assistant API. */
export const AI_API_PATH = '/api/v1/assistant';
/** Header that tells the AI backend which SigNoz instance to query against. */
export const SIGNOZ_URL_HEADER = 'X-SigNoz-URL';
/**
* Sets `X-SigNoz-URL` on every outgoing AI Assistant request. The backend
* needs the originating SigNoz instance URL for multi-tenant deployments;
* when omitted it falls back to its `SIGNOZ_API_URL` env var.
*/
export const interceptorsRequestSigNozUrl = (
value: InternalAxiosRequestConfig,
): InternalAxiosRequestConfig => {
if (value.headers) {
value.headers[SIGNOZ_URL_HEADER] = getSigNozInstanceUrl();
}
return value;
};
/**
* AI backend URL — sourced from the global config's `ai_assistant_url` field
* at runtime. `useIsAIAssistantEnabled` keeps this in sync via `setAIBackendUrl`
* whenever the config response changes; consumers (the axios instance and the
* SSE fetch path) read it lazily so they always see the current value.
*/
let aiBackendUrl: string | null = null;
export function setAIBackendUrl(url: string | null): void {
if (aiBackendUrl === url) {
return;
}
aiBackendUrl = url;
AIAssistantInstance.defaults.baseURL = url ? `${url}${AI_API_PATH}` : '';
}
/**
* Full base URL for the AI Assistant API (host + path). Throws when the
* config hasn't yet provided a URL — should never happen in practice
* because `useIsAIAssistantEnabled` gates every consumer surface.
*/
export function getAIBaseUrl(): string {
if (!aiBackendUrl) {
throw new Error('AI assistant URL is not configured.');
}
return `${aiBackendUrl}${AI_API_PATH}`;
}
/**
* Dedicated axios instance for the AI Assistant.
*
* Mirrors the request/response interceptor stack of the main SigNoz axios
* instance — most importantly `interceptorRejected`, which transparently
* rotates the access token via `/sessions/rotate` on a 401 and replays the
* original request. That's why we don't need any AI-specific 401 handling
* for REST calls: this instance inherits the same flow as the rest of the
* app for free.
*
* Only the SSE stream (`streamEvents`) still needs raw fetch since axios
* doesn't expose `ReadableStream` — that path keeps its own auth wrapper.
*/
export const AIAssistantInstance = axios.create({});
AIAssistantInstance.interceptors.request.use(interceptorsRequestResponse);
AIAssistantInstance.interceptors.request.use(interceptorsRequestBasePath);
AIAssistantInstance.interceptors.request.use(interceptorsRequestSigNozUrl);
AIAssistantInstance.interceptors.response.use(
interceptorsResponse,
interceptorRejected,
);

View File

@@ -1,543 +0,0 @@
/**
* AI Assistant API client.
*
* Flow:
* 1. POST /api/v1/assistant/threads → { threadId }
* 2. POST /api/v1/assistant/threads/{threadId}/messages → { executionId }
* 3. GET /api/v1/assistant/executions/{executionId}/events → SSE stream (closes on 'done')
*
* For subsequent messages in the same thread, repeat steps 23.
* Approval/clarification events pause the stream; use approveExecution/clarifyExecution
* to resume, which each return a new executionId to open a fresh SSE stream.
*
* Types in this file re-use the OpenAPI-generated DTOs in
* `src/api/ai-assistant/sigNozAIAssistantAPI.schemas.ts`.
* Local types are defined only when the UI needs a different shape — for
* example, the SSE event union adds a literal `type` discriminator that the
* generated event DTOs leave loose.
*
* REST calls go through `AIAssistantInstance` (an axios instance configured
* with the same interceptor stack as the rest of the app) — that gives them
* automatic 401-then-rotate behaviour for free. Only the SSE call is still
* a raw `fetch` because axios doesn't expose `ReadableStream`; that one
* path gets its own small auth wrapper.
*/
import axios from 'axios';
import getLocalStorageApi from 'api/browser/localstorage/get';
import { Logout } from 'api/utils';
import rotateSession from 'api/v2/sessions/rotate/post';
import afterLogin from 'AppRoutes/utils';
import type {
ActionResultResponseDTO,
ApprovalEventDTO,
ApproveResponseDTO,
CancelResponseDTO,
ClarificationEventDTO,
ClarifyResponseDTO,
ConversationEventDTO,
CreateMessageResponseDTO,
CreateThreadResponseDTO,
DoneEventDTO,
ErrorEventDTO,
ExecutionStateDTO,
FeedbackRatingDTO,
ListThreadsApiV1AssistantThreadsGetArchived,
ListThreadsApiV1AssistantThreadsGetParams,
MessageContextDTO,
MessageContextDTOSource,
MessageContextDTOType,
MessageEventDTO,
MessageSummaryDTO,
RegenerateResponseDTO,
StatusEventDTO,
ThinkingEventDTO,
ThreadDetailResponseDTO,
ThreadListResponseDTO,
ThreadSummaryDTO,
ToolCallEventDTO,
ToolResultEventDTO,
} from './sigNozAIAssistantAPI.schemas';
import { LOCALSTORAGE } from 'constants/localStorage';
import {
AIAssistantInstance,
getAIBaseUrl,
SIGNOZ_URL_HEADER,
} from '../AIAPIInstance';
import { getSigNozInstanceUrl } from 'utils/signozInstanceUrl';
// ---------------------------------------------------------------------------
// SSE-only auth wrapper.
//
// REST calls go through `AIAssistantInstance` (axios) and get refresh-token
// behaviour from the shared `interceptorRejected`. The SSE call has to use
// raw `fetch` (axios can't stream a `ReadableStream`), so it can't ride that
// interceptor — this small wrapper handles 401 at SSE open time by hitting
// the same rotate endpoint and replaying the request once.
//
// In typical use a REST call (e.g. sendMessage / loadThread) precedes every
// stream open, so axios will already have refreshed the token and `fetch`
// just reads the fresh one from localStorage. The wrapper exists for the
// edge case where SSE is the first call to encounter a 401.
// ---------------------------------------------------------------------------
let pendingRotate: Promise<string | null> | null = null;
async function rotateAccessToken(): Promise<string | null> {
if (pendingRotate) {
return pendingRotate;
}
const refreshToken = getLocalStorageApi(LOCALSTORAGE.REFRESH_AUTH_TOKEN) || '';
if (!refreshToken) {
return null;
}
pendingRotate = (async (): Promise<string | null> => {
try {
const response = await rotateSession({ refreshToken });
afterLogin(response.data.accessToken, response.data.refreshToken, true);
return response.data.accessToken;
} catch {
Logout();
return null;
} finally {
pendingRotate = null;
}
})();
return pendingRotate;
}
// Backoff schedule for 429 retries on SSE open. Three attempts is enough to
// absorb the brief window between cancel→send→stream when the backend is
// rate-limiting the burst, without making real "you're saturated" errors
// take forever to surface.
const SSE_429_BACKOFF_MS = [400, 1200, 2500];
function parseRetryAfterMs(value: string | null): number | null {
if (!value) {
return null;
}
const seconds = Number(value);
if (Number.isFinite(seconds)) {
return Math.max(0, seconds * 1000);
}
const date = Date.parse(value);
if (Number.isFinite(date)) {
return Math.max(0, date - Date.now());
}
return null;
}
async function fetchSSEWithAuth(
url: string,
signal?: AbortSignal,
): Promise<Response> {
const send = async (token: string | null): Promise<Response> => {
const headers: Record<string, string> = {
[SIGNOZ_URL_HEADER]: getSigNozInstanceUrl(),
};
if (token) {
headers.Authorization = `Bearer ${token}`;
}
return fetch(url, { headers, signal });
};
const sendWithAuth = async (): Promise<Response> => {
const initialToken = getLocalStorageApi(LOCALSTORAGE.AUTH_TOKEN) || '';
const res = await send(initialToken);
if (res.status !== 401) {
return res;
}
const refreshed = await rotateAccessToken();
if (!refreshed) {
return res;
}
return send(refreshed);
};
let res = await sendWithAuth();
for (const baseDelay of SSE_429_BACKOFF_MS) {
if (res.status !== 429 || signal?.aborted) {
return res;
}
const retryAfter = parseRetryAfterMs(res.headers.get('Retry-After'));
const delay = retryAfter ?? baseDelay;
// eslint-disable-next-line no-await-in-loop
await new Promise<void>((resolve, reject) => {
const timer = setTimeout(resolve, delay);
signal?.addEventListener(
'abort',
() => {
clearTimeout(timer);
reject(new DOMException('SSE 429 backoff aborted', 'AbortError'));
},
{ once: true },
);
});
// eslint-disable-next-line no-await-in-loop
res = await sendWithAuth();
}
return res;
}
// ---------------------------------------------------------------------------
// SSE event types
//
// The generated event DTOs each declare `type?: string` (loose). The UI needs
// a discriminated union, so we intersect each variant with a string-literal
// `type` to enable narrowing via `event.type === 'status'`.
// ---------------------------------------------------------------------------
export type SSEEvent =
| (StatusEventDTO & { type: 'status' })
| (MessageEventDTO & { type: 'message' })
| (ThinkingEventDTO & { type: 'thinking' })
| (ToolCallEventDTO & { type: 'tool_call' })
| (ToolResultEventDTO & { type: 'tool_result' })
| (ApprovalEventDTO & { type: 'approval' })
| (ClarificationEventDTO & { type: 'clarification' })
| (ErrorEventDTO & { type: 'error' })
| (ConversationEventDTO & { type: 'conversation' })
| (DoneEventDTO & { type: 'done' });
/** String-literal view of `ExecutionStateDTO` for ergonomic comparisons. */
export type ExecutionState = `${ExecutionStateDTO}`;
// ---------------------------------------------------------------------------
// Re-exported DTOs — the wire shape, used directly without remapping.
// ---------------------------------------------------------------------------
export type ThreadSummary = ThreadSummaryDTO;
export type ThreadListResponse = ThreadListResponseDTO;
export type ThreadDetailResponse = ThreadDetailResponseDTO;
export type MessageSummary = MessageSummaryDTO;
export type CancelResponse = CancelResponseDTO;
/**
* Construction-friendly view of `MessageContextDTO`: enum fields are widened
* to their string-literal unions so call-sites can pass `'mention'` instead
* of `MessageContextDTOSource.mention`.
*/
export type MessageContext = Omit<MessageContextDTO, 'source' | 'type'> & {
source: `${MessageContextDTOSource}`;
type: `${MessageContextDTOType}`;
};
/** Construction-friendly view of `ListThreadsApiV1AssistantThreadsGetParams`. */
export type ListThreadsOptions = Omit<
ListThreadsApiV1AssistantThreadsGetParams,
'archived'
> & {
archived?: `${ListThreadsApiV1AssistantThreadsGetArchived}`;
};
/** String-literal view of `FeedbackRatingDTO` so call-sites can pass `'positive'`/`'negative'`. */
export type FeedbackRating = `${FeedbackRatingDTO}`;
// ---------------------------------------------------------------------------
// Thread listing & detail
// ---------------------------------------------------------------------------
export async function listThreads(
options: ListThreadsOptions = {},
): Promise<ThreadListResponse> {
const {
archived = 'false',
limit = 20,
cursor = null,
sort = 'updated_desc',
} = options;
const response = await AIAssistantInstance.get<ThreadListResponse>(
'/threads',
{
params: {
archived,
limit,
sort,
...(cursor ? { cursor } : {}),
},
},
);
return response.data;
}
export async function updateThread(
threadId: string,
update: { title?: string | null; archived?: boolean | null },
): Promise<ThreadSummary> {
const response = await AIAssistantInstance.patch<ThreadSummary>(
`/threads/${threadId}`,
update,
);
return response.data;
}
export async function getThreadDetail(
threadId: string,
): Promise<ThreadDetailResponse> {
const response = await AIAssistantInstance.get<ThreadDetailResponse>(
`/threads/${threadId}`,
);
return response.data;
}
// ---------------------------------------------------------------------------
// Step 1 — Create thread
// POST /api/v1/assistant/threads → { threadId }
// ---------------------------------------------------------------------------
export async function createThread(signal?: AbortSignal): Promise<string> {
const response = await AIAssistantInstance.post<CreateThreadResponseDTO>(
'/threads',
{},
{ signal },
);
return response.data.threadId;
}
// ---------------------------------------------------------------------------
// Step 2 — Send message
// POST /api/v1/assistant/threads/{threadId}/messages → { executionId }
// ---------------------------------------------------------------------------
/** Fetches the thread's active executionId for reconnect on thread_busy (409). */
async function getActiveExecutionId(threadId: string): Promise<string | null> {
try {
const response = await AIAssistantInstance.get<ThreadDetailResponseDTO>(
`/threads/${threadId}`,
);
return response.data.activeExecutionId ?? null;
} catch {
return null;
}
}
export async function sendMessage(
threadId: string,
content: string,
contexts?: MessageContext[],
signal?: AbortSignal,
): Promise<string> {
try {
const response = await AIAssistantInstance.post<CreateMessageResponseDTO>(
`/threads/${threadId}/messages`,
{
content,
...(contexts && contexts.length > 0 ? { contexts } : {}),
},
{ signal },
);
return response.data.executionId;
} catch (err) {
// Thread already has an active execution — reconnect to it instead of
// failing the user's send.
if (axios.isAxiosError(err) && err.response?.status === 409) {
const executionId = await getActiveExecutionId(threadId);
if (executionId) {
return executionId;
}
}
throw err;
}
}
// ---------------------------------------------------------------------------
// Step 3 — Stream execution events
// GET /api/v1/assistant/executions/{executionId}/events → SSE
// ---------------------------------------------------------------------------
function parseSSELine(line: string): SSEEvent | null {
if (!line.startsWith('data: ')) {
return null;
}
const json = line.slice('data: '.length).trim();
if (!json || json === '[DONE]') {
return null;
}
try {
return JSON.parse(json) as SSEEvent;
} catch {
return null;
}
}
function parseSSEChunk(chunk: string): SSEEvent[] {
return chunk
.split('\n\n')
.map((part) => part.split('\n').find((l) => l.startsWith('data: ')) ?? '')
.map(parseSSELine)
.filter((e): e is SSEEvent => e !== null);
}
async function* readSSEReader(
reader: ReadableStreamDefaultReader<Uint8Array>,
): AsyncGenerator<SSEEvent> {
const decoder = new TextDecoder();
let lineBuffer = '';
try {
// eslint-disable-next-line no-constant-condition
while (true) {
// eslint-disable-next-line no-await-in-loop
const { done, value } = await reader.read();
if (done) {
break;
}
lineBuffer += decoder.decode(value, { stream: true });
const parts = lineBuffer.split('\n\n');
lineBuffer = parts.pop() ?? '';
yield* parts.flatMap(parseSSEChunk);
}
yield* parseSSEChunk(lineBuffer);
} finally {
reader.releaseLock();
}
}
/**
* Thrown by `streamEvents` when the SSE open returns a non-2xx response.
* Carries the HTTP status so callers can branch on rate-limit vs. other
* failures (e.g. show a "please wait a moment" message on 429).
*/
export class SSEStreamError extends Error {
status: number;
constructor(status: number, statusText: string) {
super(`SSE stream failed: ${status} ${statusText}`);
this.name = 'SSEStreamError';
this.status = status;
}
}
export async function* streamEvents(
executionId: string,
signal?: AbortSignal,
): AsyncGenerator<SSEEvent> {
const res = await fetchSSEWithAuth(
`${getAIBaseUrl()}/executions/${executionId}/events`,
signal,
);
if (!res.ok || !res.body) {
throw new SSEStreamError(res.status, res.statusText);
}
yield* readSSEReader(res.body.getReader());
}
// ---------------------------------------------------------------------------
// Approval / Clarification / Cancel actions
// ---------------------------------------------------------------------------
/** Approve a pending action. Returns a new executionId — open a fresh SSE stream for it. */
export async function approveExecution(
approvalId: string,
signal?: AbortSignal,
): Promise<string> {
const response = await AIAssistantInstance.post<ApproveResponseDTO>(
'/approve',
{ approvalId },
{ signal },
);
return response.data.executionId;
}
/** Reject a pending action. */
export async function rejectExecution(
approvalId: string,
signal?: AbortSignal,
): Promise<void> {
await AIAssistantInstance.post('/reject', { approvalId }, { signal });
}
/** Submit clarification answers. Returns a new executionId — open a fresh SSE stream for it. */
export async function clarifyExecution(
clarificationId: string,
answers: Record<string, unknown>,
signal?: AbortSignal,
): Promise<string> {
const response = await AIAssistantInstance.post<ClarifyResponseDTO>(
'/clarify',
{ clarificationId, answers },
{ signal },
);
return response.data.executionId;
}
/**
* Clean-slate regeneration of an assistant response. The backend rewinds the
* conversation up to (excluding) the supplied messageId and starts a fresh
* execution. Returns the new executionId — open an SSE stream for it the
* same way `sendMessage` and `approve` do.
*/
export async function regenerateMessage(
messageId: string,
signal?: AbortSignal,
): Promise<string> {
const response = await AIAssistantInstance.post<RegenerateResponseDTO>(
`/messages/${messageId}/regenerate`,
undefined,
{ signal },
);
return response.data.executionId;
}
export async function cancelExecution(
threadId: string,
signal?: AbortSignal,
): Promise<CancelResponse> {
const response = await AIAssistantInstance.post<CancelResponse>(
'/cancel',
{ threadId },
{ signal },
);
return response.data;
}
// ---------------------------------------------------------------------------
// Rollback actions — undo / revert / restore
// All three POST `{ actionMetadataId }` and return `ActionResultResponseDTO`.
// ---------------------------------------------------------------------------
async function postRollback(
endpoint: 'undo' | 'revert' | 'restore',
actionMetadataId: string,
signal?: AbortSignal,
): Promise<ActionResultResponseDTO> {
const response = await AIAssistantInstance.post<ActionResultResponseDTO>(
`/${endpoint}`,
{ actionMetadataId },
{ signal },
);
return response.data;
}
export const undoExecution = (
actionMetadataId: string,
signal?: AbortSignal,
): Promise<ActionResultResponseDTO> =>
postRollback('undo', actionMetadataId, signal);
export const revertExecution = (
actionMetadataId: string,
signal?: AbortSignal,
): Promise<ActionResultResponseDTO> =>
postRollback('revert', actionMetadataId, signal);
export const restoreExecution = (
actionMetadataId: string,
signal?: AbortSignal,
): Promise<ActionResultResponseDTO> =>
postRollback('restore', actionMetadataId, signal);
// ---------------------------------------------------------------------------
// Feedback
// ---------------------------------------------------------------------------
export async function submitFeedback(
messageId: string,
rating: FeedbackRating,
comment?: string,
): Promise<void> {
await AIAssistantInstance.post(`/messages/${messageId}/feedback`, {
rating,
comment: comment ?? null,
});
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation, useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation, useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation, useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation, useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation, useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation, useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation } from 'react-query';
@@ -12,198 +12,18 @@ import type {
} from 'react-query';
import type {
InframonitoringtypesPostableClustersDTO,
InframonitoringtypesPostableDeploymentsDTO,
InframonitoringtypesPostableHostsDTO,
InframonitoringtypesPostableJobsDTO,
InframonitoringtypesPostableNamespacesDTO,
InframonitoringtypesPostableNodesDTO,
InframonitoringtypesPostablePodsDTO,
InframonitoringtypesPostableStatefulSetsDTO,
InframonitoringtypesPostableVolumesDTO,
ListClusters200,
ListDeployments200,
ListHosts200,
ListJobs200,
ListNamespaces200,
ListNodes200,
ListPods200,
ListStatefulSets200,
ListVolumes200,
RenderErrorResponseDTO,
} from '../sigNoz.schemas';
import { GeneratedAPIInstance } from '../../../generatedAPIInstance';
import type { ErrorType, BodyType } from '../../../generatedAPIInstance';
/**
* Returns a paginated list of Kubernetes clusters with key aggregated metrics derived by summing per-node values within the group: CPU usage, CPU allocatable, memory working set, memory allocatable. Each row also reports per-group nodeCountsByReadiness ({ ready, notReady } from each node's latest k8s.node.condition_ready value) and per-group podCountsByPhase ({ pending, running, succeeded, failed, unknown } from each pod's latest k8s.pod.phase value). Each cluster includes metadata attributes (k8s.cluster.name). The response type is 'list' for the default k8s.cluster.name grouping or 'grouped_list' for custom groupBy keys; in both modes every row aggregates nodes and pods in the group. Supports filtering via a filter expression, custom groupBy, ordering by cpu / cpu_allocatable / memory / memory_allocatable, and pagination via offset/limit. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (clusterCPU, clusterCPUAllocatable, clusterMemory, clusterMemoryAllocatable) return -1 as a sentinel when no data is available for that field.
* @summary List Clusters for Infra Monitoring
*/
export const listClusters = (
inframonitoringtypesPostableClustersDTO: BodyType<InframonitoringtypesPostableClustersDTO>,
signal?: AbortSignal,
) => {
return GeneratedAPIInstance<ListClusters200>({
url: `/api/v2/infra_monitoring/clusters`,
method: 'POST',
headers: { 'Content-Type': 'application/json' },
data: inframonitoringtypesPostableClustersDTO,
signal,
});
};
export const getListClustersMutationOptions = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof listClusters>>,
TError,
{ data: BodyType<InframonitoringtypesPostableClustersDTO> },
TContext
>;
}): UseMutationOptions<
Awaited<ReturnType<typeof listClusters>>,
TError,
{ data: BodyType<InframonitoringtypesPostableClustersDTO> },
TContext
> => {
const mutationKey = ['listClusters'];
const { mutation: mutationOptions } = options
? options.mutation &&
'mutationKey' in options.mutation &&
options.mutation.mutationKey
? options
: { ...options, mutation: { ...options.mutation, mutationKey } }
: { mutation: { mutationKey } };
const mutationFn: MutationFunction<
Awaited<ReturnType<typeof listClusters>>,
{ data: BodyType<InframonitoringtypesPostableClustersDTO> }
> = (props) => {
const { data } = props ?? {};
return listClusters(data);
};
return { mutationFn, ...mutationOptions };
};
export type ListClustersMutationResult = NonNullable<
Awaited<ReturnType<typeof listClusters>>
>;
export type ListClustersMutationBody =
BodyType<InframonitoringtypesPostableClustersDTO>;
export type ListClustersMutationError = ErrorType<RenderErrorResponseDTO>;
/**
* @summary List Clusters for Infra Monitoring
*/
export const useListClusters = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof listClusters>>,
TError,
{ data: BodyType<InframonitoringtypesPostableClustersDTO> },
TContext
>;
}): UseMutationResult<
Awaited<ReturnType<typeof listClusters>>,
TError,
{ data: BodyType<InframonitoringtypesPostableClustersDTO> },
TContext
> => {
const mutationOptions = getListClustersMutationOptions(options);
return useMutation(mutationOptions);
};
/**
* Returns a paginated list of Kubernetes Deployments with key aggregated pod metrics: CPU usage and memory working set summed across pods owned by the deployment, plus average CPU/memory request and limit utilization (deploymentCPURequest, deploymentCPULimit, deploymentMemoryRequest, deploymentMemoryLimit). Each row also reports the latest known desiredPods (k8s.deployment.desired) and availablePods (k8s.deployment.available) replica counts and per-group podCountsByPhase ({ pending, running, succeeded, failed, unknown } from each pod's latest k8s.pod.phase value). Each deployment includes metadata attributes (k8s.deployment.name, k8s.namespace.name, k8s.cluster.name). The response type is 'list' for the default k8s.deployment.name grouping or 'grouped_list' for custom groupBy keys; in both modes every row aggregates pods owned by deployments in the group. Supports filtering via a filter expression, custom groupBy, ordering by cpu / cpu_request / cpu_limit / memory / memory_request / memory_limit / desired_pods / available_pods, and pagination via offset/limit. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (deploymentCPU, deploymentCPURequest, deploymentCPULimit, deploymentMemory, deploymentMemoryRequest, deploymentMemoryLimit, desiredPods, availablePods) return -1 as a sentinel when no data is available for that field.
* @summary List Deployments for Infra Monitoring
*/
export const listDeployments = (
inframonitoringtypesPostableDeploymentsDTO: BodyType<InframonitoringtypesPostableDeploymentsDTO>,
signal?: AbortSignal,
) => {
return GeneratedAPIInstance<ListDeployments200>({
url: `/api/v2/infra_monitoring/deployments`,
method: 'POST',
headers: { 'Content-Type': 'application/json' },
data: inframonitoringtypesPostableDeploymentsDTO,
signal,
});
};
export const getListDeploymentsMutationOptions = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof listDeployments>>,
TError,
{ data: BodyType<InframonitoringtypesPostableDeploymentsDTO> },
TContext
>;
}): UseMutationOptions<
Awaited<ReturnType<typeof listDeployments>>,
TError,
{ data: BodyType<InframonitoringtypesPostableDeploymentsDTO> },
TContext
> => {
const mutationKey = ['listDeployments'];
const { mutation: mutationOptions } = options
? options.mutation &&
'mutationKey' in options.mutation &&
options.mutation.mutationKey
? options
: { ...options, mutation: { ...options.mutation, mutationKey } }
: { mutation: { mutationKey } };
const mutationFn: MutationFunction<
Awaited<ReturnType<typeof listDeployments>>,
{ data: BodyType<InframonitoringtypesPostableDeploymentsDTO> }
> = (props) => {
const { data } = props ?? {};
return listDeployments(data);
};
return { mutationFn, ...mutationOptions };
};
export type ListDeploymentsMutationResult = NonNullable<
Awaited<ReturnType<typeof listDeployments>>
>;
export type ListDeploymentsMutationBody =
BodyType<InframonitoringtypesPostableDeploymentsDTO>;
export type ListDeploymentsMutationError = ErrorType<RenderErrorResponseDTO>;
/**
* @summary List Deployments for Infra Monitoring
*/
export const useListDeployments = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof listDeployments>>,
TError,
{ data: BodyType<InframonitoringtypesPostableDeploymentsDTO> },
TContext
>;
}): UseMutationResult<
Awaited<ReturnType<typeof listDeployments>>,
TError,
{ data: BodyType<InframonitoringtypesPostableDeploymentsDTO> },
TContext
> => {
const mutationOptions = getListDeploymentsMutationOptions(options);
return useMutation(mutationOptions);
};
/**
* Returns a paginated list of hosts with key infrastructure metrics: CPU usage (%), memory usage (%), I/O wait (%), disk usage (%), and 15-minute load average. Each host includes its current status (active/inactive based on metrics reported in the last 10 minutes) and metadata attributes (e.g., os.type). Supports filtering via a filter expression, filtering by host status, custom groupBy to aggregate hosts by any attribute, ordering by any of the five metrics, and pagination via offset/limit. The response type is 'list' for the default host.name grouping or 'grouped_list' for custom groupBy keys. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (cpu, memory, wait, load15, diskUsage) return -1 as a sentinel when no data is available for that field.
* @summary List Hosts for Infra Monitoring
@@ -288,174 +108,6 @@ export const useListHosts = <
return useMutation(mutationOptions);
};
/**
* Returns a paginated list of Kubernetes Jobs with key aggregated pod metrics: CPU usage and memory working set summed across pods owned by the job, plus average CPU/memory request and limit utilization (jobCPURequest, jobCPULimit, jobMemoryRequest, jobMemoryLimit). Each row also reports the latest known job-level counters from kube-state-metrics: desiredSuccessfulPods (k8s.job.desired_successful_pods, the target completion count), activePods (k8s.job.active_pods), failedPods (k8s.job.failed_pods, cumulative across the lifetime of the job), and successfulPods (k8s.job.successful_pods, cumulative). It also reports per-group podCountsByPhase ({ pending, running, succeeded, failed, unknown } from each pod's latest k8s.pod.phase value); note podCountsByPhase.failed (current pod-phase) is distinct from failedPods (cumulative job kube-state-metric). Each job includes metadata attributes (k8s.job.name, k8s.namespace.name, k8s.cluster.name). The response type is 'list' for the default k8s.job.name grouping or 'grouped_list' for custom groupBy keys; in both modes every row aggregates pods owned by jobs in the group. Supports filtering via a filter expression, custom groupBy, ordering by cpu / cpu_request / cpu_limit / memory / memory_request / memory_limit / desired_successful_pods / active_pods / failed_pods / successful_pods, and pagination via offset/limit. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (jobCPU, jobCPURequest, jobCPULimit, jobMemory, jobMemoryRequest, jobMemoryLimit, desiredSuccessfulPods, activePods, failedPods, successfulPods) return -1 as a sentinel when no data is available for that field.
* @summary List Jobs for Infra Monitoring
*/
export const listJobs = (
inframonitoringtypesPostableJobsDTO: BodyType<InframonitoringtypesPostableJobsDTO>,
signal?: AbortSignal,
) => {
return GeneratedAPIInstance<ListJobs200>({
url: `/api/v2/infra_monitoring/jobs`,
method: 'POST',
headers: { 'Content-Type': 'application/json' },
data: inframonitoringtypesPostableJobsDTO,
signal,
});
};
export const getListJobsMutationOptions = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof listJobs>>,
TError,
{ data: BodyType<InframonitoringtypesPostableJobsDTO> },
TContext
>;
}): UseMutationOptions<
Awaited<ReturnType<typeof listJobs>>,
TError,
{ data: BodyType<InframonitoringtypesPostableJobsDTO> },
TContext
> => {
const mutationKey = ['listJobs'];
const { mutation: mutationOptions } = options
? options.mutation &&
'mutationKey' in options.mutation &&
options.mutation.mutationKey
? options
: { ...options, mutation: { ...options.mutation, mutationKey } }
: { mutation: { mutationKey } };
const mutationFn: MutationFunction<
Awaited<ReturnType<typeof listJobs>>,
{ data: BodyType<InframonitoringtypesPostableJobsDTO> }
> = (props) => {
const { data } = props ?? {};
return listJobs(data);
};
return { mutationFn, ...mutationOptions };
};
export type ListJobsMutationResult = NonNullable<
Awaited<ReturnType<typeof listJobs>>
>;
export type ListJobsMutationBody =
BodyType<InframonitoringtypesPostableJobsDTO>;
export type ListJobsMutationError = ErrorType<RenderErrorResponseDTO>;
/**
* @summary List Jobs for Infra Monitoring
*/
export const useListJobs = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof listJobs>>,
TError,
{ data: BodyType<InframonitoringtypesPostableJobsDTO> },
TContext
>;
}): UseMutationResult<
Awaited<ReturnType<typeof listJobs>>,
TError,
{ data: BodyType<InframonitoringtypesPostableJobsDTO> },
TContext
> => {
const mutationOptions = getListJobsMutationOptions(options);
return useMutation(mutationOptions);
};
/**
* Returns a paginated list of Kubernetes namespaces with key aggregated pod metrics: CPU usage and memory working set (summed across pods in the group), plus per-group podCountsByPhase ({ pending, running, succeeded, failed, unknown } from each pod's latest k8s.pod.phase value in the window). Each namespace includes metadata attributes (k8s.namespace.name, k8s.cluster.name). The response type is 'list' for the default k8s.namespace.name grouping or 'grouped_list' for custom groupBy keys; in both modes every row aggregates pods in the group. Supports filtering via a filter expression, custom groupBy, ordering by cpu / memory, and pagination via offset/limit. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (namespaceCPU, namespaceMemory) return -1 as a sentinel when no data is available for that field.
* @summary List Namespaces for Infra Monitoring
*/
export const listNamespaces = (
inframonitoringtypesPostableNamespacesDTO: BodyType<InframonitoringtypesPostableNamespacesDTO>,
signal?: AbortSignal,
) => {
return GeneratedAPIInstance<ListNamespaces200>({
url: `/api/v2/infra_monitoring/namespaces`,
method: 'POST',
headers: { 'Content-Type': 'application/json' },
data: inframonitoringtypesPostableNamespacesDTO,
signal,
});
};
export const getListNamespacesMutationOptions = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof listNamespaces>>,
TError,
{ data: BodyType<InframonitoringtypesPostableNamespacesDTO> },
TContext
>;
}): UseMutationOptions<
Awaited<ReturnType<typeof listNamespaces>>,
TError,
{ data: BodyType<InframonitoringtypesPostableNamespacesDTO> },
TContext
> => {
const mutationKey = ['listNamespaces'];
const { mutation: mutationOptions } = options
? options.mutation &&
'mutationKey' in options.mutation &&
options.mutation.mutationKey
? options
: { ...options, mutation: { ...options.mutation, mutationKey } }
: { mutation: { mutationKey } };
const mutationFn: MutationFunction<
Awaited<ReturnType<typeof listNamespaces>>,
{ data: BodyType<InframonitoringtypesPostableNamespacesDTO> }
> = (props) => {
const { data } = props ?? {};
return listNamespaces(data);
};
return { mutationFn, ...mutationOptions };
};
export type ListNamespacesMutationResult = NonNullable<
Awaited<ReturnType<typeof listNamespaces>>
>;
export type ListNamespacesMutationBody =
BodyType<InframonitoringtypesPostableNamespacesDTO>;
export type ListNamespacesMutationError = ErrorType<RenderErrorResponseDTO>;
/**
* @summary List Namespaces for Infra Monitoring
*/
export const useListNamespaces = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof listNamespaces>>,
TError,
{ data: BodyType<InframonitoringtypesPostableNamespacesDTO> },
TContext
>;
}): UseMutationResult<
Awaited<ReturnType<typeof listNamespaces>>,
TError,
{ data: BodyType<InframonitoringtypesPostableNamespacesDTO> },
TContext
> => {
const mutationOptions = getListNamespacesMutationOptions(options);
return useMutation(mutationOptions);
};
/**
* Returns a paginated list of Kubernetes nodes with key metrics: CPU usage, CPU allocatable, memory working set, memory allocatable, per-group nodeCountsByReadiness ({ ready, notReady } from each node's latest k8s.node.condition_ready in the window) and per-group podCountsByPhase ({ pending, running, succeeded, failed, unknown } for pods scheduled on the listed nodes). Each node includes metadata attributes (k8s.node.uid, k8s.cluster.name). The response type is 'list' for the default k8s.node.name grouping (each row is one node with its current condition string: ready / not_ready / no_data) or 'grouped_list' for custom groupBy keys (each row aggregates nodes in the group; condition stays no_data). Supports filtering via a filter expression, custom groupBy, ordering by cpu / cpu_allocatable / memory / memory_allocatable, and pagination via offset/limit. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (nodeCPU, nodeCPUAllocatable, nodeMemory, nodeMemoryAllocatable) return -1 as a sentinel when no data is available for that field.
* @summary List Nodes for Infra Monitoring
@@ -624,171 +276,3 @@ export const useListPods = <
return useMutation(mutationOptions);
};
/**
* Returns a paginated list of Kubernetes persistent volume claims (PVCs) with key volume metrics: available bytes, capacity bytes, usage (capacity - available), inodes, free inodes, and used inodes. Each row also includes metadata attributes (k8s.persistentvolumeclaim.name, k8s.pod.uid, k8s.pod.name, k8s.namespace.name, k8s.node.name, k8s.statefulset.name, k8s.cluster.name). Supports filtering via a filter expression, custom groupBy to aggregate volumes by any attribute, ordering by any of the six metrics (available, capacity, usage, inodes, inodes_free, inodes_used), and pagination via offset/limit. The response type is 'list' for the default k8s.persistentvolumeclaim.name grouping or 'grouped_list' for custom groupBy keys; in both modes every row aggregates volumes in the group. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (volumeAvailable, volumeCapacity, volumeUsage, volumeInodes, volumeInodesFree, volumeInodesUsed) return -1 as a sentinel when no data is available for that field.
* @summary List Volumes for Infra Monitoring
*/
export const listVolumes = (
inframonitoringtypesPostableVolumesDTO: BodyType<InframonitoringtypesPostableVolumesDTO>,
signal?: AbortSignal,
) => {
return GeneratedAPIInstance<ListVolumes200>({
url: `/api/v2/infra_monitoring/pvcs`,
method: 'POST',
headers: { 'Content-Type': 'application/json' },
data: inframonitoringtypesPostableVolumesDTO,
signal,
});
};
export const getListVolumesMutationOptions = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof listVolumes>>,
TError,
{ data: BodyType<InframonitoringtypesPostableVolumesDTO> },
TContext
>;
}): UseMutationOptions<
Awaited<ReturnType<typeof listVolumes>>,
TError,
{ data: BodyType<InframonitoringtypesPostableVolumesDTO> },
TContext
> => {
const mutationKey = ['listVolumes'];
const { mutation: mutationOptions } = options
? options.mutation &&
'mutationKey' in options.mutation &&
options.mutation.mutationKey
? options
: { ...options, mutation: { ...options.mutation, mutationKey } }
: { mutation: { mutationKey } };
const mutationFn: MutationFunction<
Awaited<ReturnType<typeof listVolumes>>,
{ data: BodyType<InframonitoringtypesPostableVolumesDTO> }
> = (props) => {
const { data } = props ?? {};
return listVolumes(data);
};
return { mutationFn, ...mutationOptions };
};
export type ListVolumesMutationResult = NonNullable<
Awaited<ReturnType<typeof listVolumes>>
>;
export type ListVolumesMutationBody =
BodyType<InframonitoringtypesPostableVolumesDTO>;
export type ListVolumesMutationError = ErrorType<RenderErrorResponseDTO>;
/**
* @summary List Volumes for Infra Monitoring
*/
export const useListVolumes = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof listVolumes>>,
TError,
{ data: BodyType<InframonitoringtypesPostableVolumesDTO> },
TContext
>;
}): UseMutationResult<
Awaited<ReturnType<typeof listVolumes>>,
TError,
{ data: BodyType<InframonitoringtypesPostableVolumesDTO> },
TContext
> => {
const mutationOptions = getListVolumesMutationOptions(options);
return useMutation(mutationOptions);
};
/**
* Returns a paginated list of Kubernetes StatefulSets with key aggregated pod metrics: CPU usage and memory working set summed across pods owned by the statefulset, plus average CPU/memory request and limit utilization (statefulSetCPURequest, statefulSetCPULimit, statefulSetMemoryRequest, statefulSetMemoryLimit). Each row also reports the latest known desiredPods (k8s.statefulset.desired_pods) and currentPods (k8s.statefulset.current_pods) replica counts and per-group podCountsByPhase ({ pending, running, succeeded, failed, unknown } from each pod's latest k8s.pod.phase value). Each statefulset includes metadata attributes (k8s.statefulset.name, k8s.namespace.name, k8s.cluster.name). The response type is 'list' for the default k8s.statefulset.name grouping or 'grouped_list' for custom groupBy keys; in both modes every row aggregates pods owned by statefulsets in the group. Supports filtering via a filter expression, custom groupBy, ordering by cpu / cpu_request / cpu_limit / memory / memory_request / memory_limit / desired_pods / current_pods, and pagination via offset/limit. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (statefulSetCPU, statefulSetCPURequest, statefulSetCPULimit, statefulSetMemory, statefulSetMemoryRequest, statefulSetMemoryLimit, desiredPods, currentPods) return -1 as a sentinel when no data is available for that field.
* @summary List StatefulSets for Infra Monitoring
*/
export const listStatefulSets = (
inframonitoringtypesPostableStatefulSetsDTO: BodyType<InframonitoringtypesPostableStatefulSetsDTO>,
signal?: AbortSignal,
) => {
return GeneratedAPIInstance<ListStatefulSets200>({
url: `/api/v2/infra_monitoring/statefulsets`,
method: 'POST',
headers: { 'Content-Type': 'application/json' },
data: inframonitoringtypesPostableStatefulSetsDTO,
signal,
});
};
export const getListStatefulSetsMutationOptions = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof listStatefulSets>>,
TError,
{ data: BodyType<InframonitoringtypesPostableStatefulSetsDTO> },
TContext
>;
}): UseMutationOptions<
Awaited<ReturnType<typeof listStatefulSets>>,
TError,
{ data: BodyType<InframonitoringtypesPostableStatefulSetsDTO> },
TContext
> => {
const mutationKey = ['listStatefulSets'];
const { mutation: mutationOptions } = options
? options.mutation &&
'mutationKey' in options.mutation &&
options.mutation.mutationKey
? options
: { ...options, mutation: { ...options.mutation, mutationKey } }
: { mutation: { mutationKey } };
const mutationFn: MutationFunction<
Awaited<ReturnType<typeof listStatefulSets>>,
{ data: BodyType<InframonitoringtypesPostableStatefulSetsDTO> }
> = (props) => {
const { data } = props ?? {};
return listStatefulSets(data);
};
return { mutationFn, ...mutationOptions };
};
export type ListStatefulSetsMutationResult = NonNullable<
Awaited<ReturnType<typeof listStatefulSets>>
>;
export type ListStatefulSetsMutationBody =
BodyType<InframonitoringtypesPostableStatefulSetsDTO>;
export type ListStatefulSetsMutationError = ErrorType<RenderErrorResponseDTO>;
/**
* @summary List StatefulSets for Infra Monitoring
*/
export const useListStatefulSets = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof listStatefulSets>>,
TError,
{ data: BodyType<InframonitoringtypesPostableStatefulSetsDTO> },
TContext
>;
}): UseMutationResult<
Awaited<ReturnType<typeof listStatefulSets>>,
TError,
{ data: BodyType<InframonitoringtypesPostableStatefulSetsDTO> },
TContext
> => {
const mutationOptions = getListStatefulSetsMutationOptions(options);
return useMutation(mutationOptions);
};

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation, useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation, useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation, useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation, useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation, useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation, useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation, useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation, useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation, useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation, useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
export interface AlertmanagertypesChannelDTO {
@@ -1840,7 +1840,6 @@ export enum AuthtypesRelationDTO {
list = 'list',
assignee = 'assignee',
attach = 'attach',
detach = 'detach',
}
export interface AuthtypesRoleDTO {
/**
@@ -4162,7 +4161,7 @@ export enum CoretypesTypeDTO {
role = 'role',
organization = 'organization',
metaresource = 'metaresource',
telemetryresource = 'telemetryresource',
metaresources = 'metaresources',
}
export interface DashboardtypesDashboardDTO {
/**
@@ -4569,143 +4568,6 @@ export interface GlobaltypesTokenizerConfigDTO {
enabled?: boolean;
}
/**
* @nullable
*/
export type InframonitoringtypesClusterRecordDTOMeta = {
[key: string]: string;
} | null;
export interface InframonitoringtypesClusterRecordDTO {
/**
* @type number
* @format double
*/
clusterCPU: number;
/**
* @type number
* @format double
*/
clusterCPUAllocatable: number;
/**
* @type number
* @format double
*/
clusterMemory: number;
/**
* @type number
* @format double
*/
clusterMemoryAllocatable: number;
/**
* @type string
*/
clusterName: string;
/**
* @type object
* @nullable true
*/
meta: InframonitoringtypesClusterRecordDTOMeta;
nodeCountsByReadiness: InframonitoringtypesNodeCountsByReadinessDTO;
podCountsByPhase: InframonitoringtypesPodCountsByPhaseDTO;
}
export interface InframonitoringtypesClustersDTO {
/**
* @type boolean
*/
endTimeBeforeRetention: boolean;
/**
* @type array
* @nullable true
*/
records: InframonitoringtypesClusterRecordDTO[] | null;
requiredMetricsCheck: InframonitoringtypesRequiredMetricsCheckDTO;
/**
* @type integer
*/
total: number;
type: InframonitoringtypesResponseTypeDTO;
warning?: Querybuildertypesv5QueryWarnDataDTO;
}
/**
* @nullable
*/
export type InframonitoringtypesDeploymentRecordDTOMeta = {
[key: string]: string;
} | null;
export interface InframonitoringtypesDeploymentRecordDTO {
/**
* @type integer
*/
availablePods: number;
/**
* @type number
* @format double
*/
deploymentCPU: number;
/**
* @type number
* @format double
*/
deploymentCPULimit: number;
/**
* @type number
* @format double
*/
deploymentCPURequest: number;
/**
* @type number
* @format double
*/
deploymentMemory: number;
/**
* @type number
* @format double
*/
deploymentMemoryLimit: number;
/**
* @type number
* @format double
*/
deploymentMemoryRequest: number;
/**
* @type string
*/
deploymentName: string;
/**
* @type integer
*/
desiredPods: number;
/**
* @type object
* @nullable true
*/
meta: InframonitoringtypesDeploymentRecordDTOMeta;
podCountsByPhase: InframonitoringtypesPodCountsByPhaseDTO;
}
export interface InframonitoringtypesDeploymentsDTO {
/**
* @type boolean
*/
endTimeBeforeRetention: boolean;
/**
* @type array
* @nullable true
*/
records: InframonitoringtypesDeploymentRecordDTO[] | null;
requiredMetricsCheck: InframonitoringtypesRequiredMetricsCheckDTO;
/**
* @type integer
*/
total: number;
type: InframonitoringtypesResponseTypeDTO;
warning?: Querybuildertypesv5QueryWarnDataDTO;
}
export interface InframonitoringtypesHostFilterDTO {
/**
* @type string
@@ -4791,140 +4653,6 @@ export interface InframonitoringtypesHostsDTO {
warning?: Querybuildertypesv5QueryWarnDataDTO;
}
/**
* @nullable
*/
export type InframonitoringtypesJobRecordDTOMeta = {
[key: string]: string;
} | null;
export interface InframonitoringtypesJobRecordDTO {
/**
* @type integer
*/
activePods: number;
/**
* @type integer
*/
desiredSuccessfulPods: number;
/**
* @type integer
*/
failedPods: number;
/**
* @type number
* @format double
*/
jobCPU: number;
/**
* @type number
* @format double
*/
jobCPULimit: number;
/**
* @type number
* @format double
*/
jobCPURequest: number;
/**
* @type number
* @format double
*/
jobMemory: number;
/**
* @type number
* @format double
*/
jobMemoryLimit: number;
/**
* @type number
* @format double
*/
jobMemoryRequest: number;
/**
* @type string
*/
jobName: string;
/**
* @type object
* @nullable true
*/
meta: InframonitoringtypesJobRecordDTOMeta;
podCountsByPhase: InframonitoringtypesPodCountsByPhaseDTO;
/**
* @type integer
*/
successfulPods: number;
}
export interface InframonitoringtypesJobsDTO {
/**
* @type boolean
*/
endTimeBeforeRetention: boolean;
/**
* @type array
* @nullable true
*/
records: InframonitoringtypesJobRecordDTO[] | null;
requiredMetricsCheck: InframonitoringtypesRequiredMetricsCheckDTO;
/**
* @type integer
*/
total: number;
type: InframonitoringtypesResponseTypeDTO;
warning?: Querybuildertypesv5QueryWarnDataDTO;
}
/**
* @nullable
*/
export type InframonitoringtypesNamespaceRecordDTOMeta = {
[key: string]: string;
} | null;
export interface InframonitoringtypesNamespaceRecordDTO {
/**
* @type object
* @nullable true
*/
meta: InframonitoringtypesNamespaceRecordDTOMeta;
/**
* @type number
* @format double
*/
namespaceCPU: number;
/**
* @type number
* @format double
*/
namespaceMemory: number;
/**
* @type string
*/
namespaceName: string;
podCountsByPhase: InframonitoringtypesPodCountsByPhaseDTO;
}
export interface InframonitoringtypesNamespacesDTO {
/**
* @type boolean
*/
endTimeBeforeRetention: boolean;
/**
* @type array
* @nullable true
*/
records: InframonitoringtypesNamespaceRecordDTO[] | null;
requiredMetricsCheck: InframonitoringtypesRequiredMetricsCheckDTO;
/**
* @type integer
*/
total: number;
type: InframonitoringtypesResponseTypeDTO;
warning?: Querybuildertypesv5QueryWarnDataDTO;
}
export enum InframonitoringtypesNodeConditionDTO {
ready = 'ready',
not_ready = 'not_ready',
@@ -5108,62 +4836,6 @@ export interface InframonitoringtypesPodsDTO {
warning?: Querybuildertypesv5QueryWarnDataDTO;
}
export interface InframonitoringtypesPostableClustersDTO {
/**
* @type integer
* @format int64
*/
end: number;
filter?: Querybuildertypesv5FilterDTO;
/**
* @type array
* @nullable true
*/
groupBy?: Querybuildertypesv5GroupByKeyDTO[] | null;
/**
* @type integer
*/
limit: number;
/**
* @type integer
*/
offset?: number;
orderBy?: Querybuildertypesv5OrderByDTO;
/**
* @type integer
* @format int64
*/
start: number;
}
export interface InframonitoringtypesPostableDeploymentsDTO {
/**
* @type integer
* @format int64
*/
end: number;
filter?: Querybuildertypesv5FilterDTO;
/**
* @type array
* @nullable true
*/
groupBy?: Querybuildertypesv5GroupByKeyDTO[] | null;
/**
* @type integer
*/
limit: number;
/**
* @type integer
*/
offset?: number;
orderBy?: Querybuildertypesv5OrderByDTO;
/**
* @type integer
* @format int64
*/
start: number;
}
export interface InframonitoringtypesPostableHostsDTO {
/**
* @type integer
@@ -5192,62 +4864,6 @@ export interface InframonitoringtypesPostableHostsDTO {
start: number;
}
export interface InframonitoringtypesPostableJobsDTO {
/**
* @type integer
* @format int64
*/
end: number;
filter?: Querybuildertypesv5FilterDTO;
/**
* @type array
* @nullable true
*/
groupBy?: Querybuildertypesv5GroupByKeyDTO[] | null;
/**
* @type integer
*/
limit: number;
/**
* @type integer
*/
offset?: number;
orderBy?: Querybuildertypesv5OrderByDTO;
/**
* @type integer
* @format int64
*/
start: number;
}
export interface InframonitoringtypesPostableNamespacesDTO {
/**
* @type integer
* @format int64
*/
end: number;
filter?: Querybuildertypesv5FilterDTO;
/**
* @type array
* @nullable true
*/
groupBy?: Querybuildertypesv5GroupByKeyDTO[] | null;
/**
* @type integer
*/
limit: number;
/**
* @type integer
*/
offset?: number;
orderBy?: Querybuildertypesv5OrderByDTO;
/**
* @type integer
* @format int64
*/
start: number;
}
export interface InframonitoringtypesPostableNodesDTO {
/**
* @type integer
@@ -5304,62 +4920,6 @@ export interface InframonitoringtypesPostablePodsDTO {
start: number;
}
export interface InframonitoringtypesPostableStatefulSetsDTO {
/**
* @type integer
* @format int64
*/
end: number;
filter?: Querybuildertypesv5FilterDTO;
/**
* @type array
* @nullable true
*/
groupBy?: Querybuildertypesv5GroupByKeyDTO[] | null;
/**
* @type integer
*/
limit: number;
/**
* @type integer
*/
offset?: number;
orderBy?: Querybuildertypesv5OrderByDTO;
/**
* @type integer
* @format int64
*/
start: number;
}
export interface InframonitoringtypesPostableVolumesDTO {
/**
* @type integer
* @format int64
*/
end: number;
filter?: Querybuildertypesv5FilterDTO;
/**
* @type array
* @nullable true
*/
groupBy?: Querybuildertypesv5GroupByKeyDTO[] | null;
/**
* @type integer
*/
limit: number;
/**
* @type integer
*/
offset?: number;
orderBy?: Querybuildertypesv5OrderByDTO;
/**
* @type integer
* @format int64
*/
start: number;
}
export interface InframonitoringtypesRequiredMetricsCheckDTO {
/**
* @type array
@@ -5372,151 +4932,6 @@ export enum InframonitoringtypesResponseTypeDTO {
list = 'list',
grouped_list = 'grouped_list',
}
/**
* @nullable
*/
export type InframonitoringtypesStatefulSetRecordDTOMeta = {
[key: string]: string;
} | null;
export interface InframonitoringtypesStatefulSetRecordDTO {
/**
* @type integer
*/
currentPods: number;
/**
* @type integer
*/
desiredPods: number;
/**
* @type object
* @nullable true
*/
meta: InframonitoringtypesStatefulSetRecordDTOMeta;
podCountsByPhase: InframonitoringtypesPodCountsByPhaseDTO;
/**
* @type number
* @format double
*/
statefulSetCPU: number;
/**
* @type number
* @format double
*/
statefulSetCPULimit: number;
/**
* @type number
* @format double
*/
statefulSetCPURequest: number;
/**
* @type number
* @format double
*/
statefulSetMemory: number;
/**
* @type number
* @format double
*/
statefulSetMemoryLimit: number;
/**
* @type number
* @format double
*/
statefulSetMemoryRequest: number;
/**
* @type string
*/
statefulSetName: string;
}
export interface InframonitoringtypesStatefulSetsDTO {
/**
* @type boolean
*/
endTimeBeforeRetention: boolean;
/**
* @type array
* @nullable true
*/
records: InframonitoringtypesStatefulSetRecordDTO[] | null;
requiredMetricsCheck: InframonitoringtypesRequiredMetricsCheckDTO;
/**
* @type integer
*/
total: number;
type: InframonitoringtypesResponseTypeDTO;
warning?: Querybuildertypesv5QueryWarnDataDTO;
}
/**
* @nullable
*/
export type InframonitoringtypesVolumeRecordDTOMeta = {
[key: string]: string;
} | null;
export interface InframonitoringtypesVolumeRecordDTO {
/**
* @type object
* @nullable true
*/
meta: InframonitoringtypesVolumeRecordDTOMeta;
/**
* @type string
*/
persistentVolumeClaimName: string;
/**
* @type number
* @format double
*/
volumeAvailable: number;
/**
* @type number
* @format double
*/
volumeCapacity: number;
/**
* @type number
* @format double
*/
volumeInodes: number;
/**
* @type number
* @format double
*/
volumeInodesFree: number;
/**
* @type number
* @format double
*/
volumeInodesUsed: number;
/**
* @type number
* @format double
*/
volumeUsage: number;
}
export interface InframonitoringtypesVolumesDTO {
/**
* @type boolean
*/
endTimeBeforeRetention: boolean;
/**
* @type array
* @nullable true
*/
records: InframonitoringtypesVolumeRecordDTO[] | null;
requiredMetricsCheck: InframonitoringtypesRequiredMetricsCheckDTO;
/**
* @type integer
*/
total: number;
type: InframonitoringtypesResponseTypeDTO;
warning?: Querybuildertypesv5QueryWarnDataDTO;
}
export interface LlmpricingruletypesGettablePricingRulesDTO {
/**
* @type array
@@ -7899,7 +7314,7 @@ export interface SpantypesPostableSpanMapperDTO {
* @type boolean
*/
enabled?: boolean;
fieldContext: SpantypesFieldContextDTO;
field_context: SpantypesFieldContextDTO;
/**
* @type string
*/
@@ -7907,6 +7322,7 @@ export interface SpantypesPostableSpanMapperDTO {
}
export interface SpantypesPostableSpanMapperGroupDTO {
category: SpantypesSpanMapperGroupCategoryDTO;
condition: SpantypesSpanMapperGroupConditionDTO;
/**
* @type boolean
@@ -7933,7 +7349,7 @@ export interface SpantypesSpanMapperDTO {
* @type boolean
*/
enabled: boolean;
fieldContext: SpantypesFieldContextDTO;
field_context: SpantypesFieldContextDTO;
/**
* @type string
*/
@@ -7966,6 +7382,7 @@ export interface SpantypesSpanMapperConfigDTO {
}
export interface SpantypesSpanMapperGroupDTO {
category: SpantypesSpanMapperGroupCategoryDTO;
condition: SpantypesSpanMapperGroupConditionDTO;
/**
* @type string
@@ -8003,10 +7420,11 @@ export interface SpantypesSpanMapperGroupDTO {
updatedBy?: string;
}
/**
* @nullable
*/
export type SpantypesSpanMapperGroupConditionDTO = {
export interface SpantypesSpanMapperGroupCategoryDTO {
[key: string]: unknown;
}
export interface SpantypesSpanMapperGroupConditionDTO {
/**
* @type array
* @nullable true
@@ -8017,7 +7435,7 @@ export type SpantypesSpanMapperGroupConditionDTO = {
* @nullable true
*/
resource: string[] | null;
} | null;
}
export enum SpantypesSpanMapperOperationDTO {
move = 'move',
@@ -8043,7 +7461,7 @@ export interface SpantypesUpdatableSpanMapperDTO {
* @nullable true
*/
enabled?: boolean | null;
fieldContext?: SpantypesFieldContextDTO;
field_context?: SpantypesFieldContextDTO;
}
export interface SpantypesUpdatableSpanMapperGroupDTO {
@@ -9591,6 +9009,10 @@ export type GetMyServiceAccount200 = {
};
export type ListSpanMapperGroupsParams = {
/**
* @description undefined
*/
category?: SpantypesSpanMapperGroupCategoryDTO;
/**
* @type boolean
* @nullable true
@@ -9825,22 +9247,6 @@ export type Healthz503 = {
status: string;
};
export type ListClusters200 = {
data: InframonitoringtypesClustersDTO;
/**
* @type string
*/
status: string;
};
export type ListDeployments200 = {
data: InframonitoringtypesDeploymentsDTO;
/**
* @type string
*/
status: string;
};
export type ListHosts200 = {
data: InframonitoringtypesHostsDTO;
/**
@@ -9849,22 +9255,6 @@ export type ListHosts200 = {
status: string;
};
export type ListJobs200 = {
data: InframonitoringtypesJobsDTO;
/**
* @type string
*/
status: string;
};
export type ListNamespaces200 = {
data: InframonitoringtypesNamespacesDTO;
/**
* @type string
*/
status: string;
};
export type ListNodes200 = {
data: InframonitoringtypesNodesDTO;
/**
@@ -9881,22 +9271,6 @@ export type ListPods200 = {
status: string;
};
export type ListVolumes200 = {
data: InframonitoringtypesVolumesDTO;
/**
* @type string
*/
status: string;
};
export type ListStatefulSets200 = {
data: InframonitoringtypesStatefulSetsDTO;
/**
* @type string
*/
status: string;
};
export type Livez200 = {
data: FactoryResponseDTO;
/**

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation, useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation, useQuery } from 'react-query';

View File

@@ -1,7 +1,7 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'pnpm generate:api'
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation, useQuery } from 'react-query';

View File

@@ -4,46 +4,14 @@ import {
interceptorsRequestResponse,
interceptorsResponse,
} from 'api';
import { ENVIRONMENT } from 'constants/env';
import axios, { AxiosError, AxiosRequestConfig } from 'axios';
import { ENVIRONMENT } from 'constants/env';
// generated API Instance
const generatedAPIAxiosInstance = axios.create({
baseURL: ENVIRONMENT.baseURL,
});
let generatedAPIQueryKeyHeaderContext: Record<string, unknown> | undefined;
export const setGeneratedAPIQueryKeyHeaderContext = <THeaders extends object>(
headers?: THeaders,
): void => {
generatedAPIQueryKeyHeaderContext = headers
? { ...(headers as Record<string, unknown>) }
: undefined;
};
const hashHeaderValue = (value: string): string => {
let hash = 0;
for (let index = 0; index < value.length; index += 1) {
hash = (hash * 31 + value.charCodeAt(index)) >>> 0;
}
return hash.toString(16);
};
const mergeHeaderRecord = (
target: Record<string, unknown>,
source: unknown,
): Record<string, unknown> => {
if (!source || typeof source !== 'object') {
return target;
}
return Object.assign(target, source as Record<string, unknown>);
};
export const GeneratedAPIInstance = <T>(
config: AxiosRequestConfig,
): Promise<T> => {
@@ -58,59 +26,5 @@ generatedAPIAxiosInstance.interceptors.response.use(
interceptorRejected,
);
const getDefaultQueryKeyHeaders = (): Record<string, unknown> => {
const defaults = generatedAPIAxiosInstance.defaults
.headers as unknown as Record<string, unknown>;
const headers: Record<string, unknown> = {};
const methodKeys = new Set([
'common',
'delete',
'get',
'head',
'options',
'patch',
'post',
'put',
]);
mergeHeaderRecord(headers, defaults?.common);
mergeHeaderRecord(headers, defaults?.get);
for (const [key, value] of Object.entries(defaults ?? {})) {
if (!methodKeys.has(key)) {
headers[key] = value;
}
}
return headers;
};
export const getGeneratedAPIQueryKeyHeaders = <THeaders extends object>(
headers?: THeaders,
): [{ headers: Record<string, unknown> }] | [] => {
const mergedHeaders = {
...getDefaultQueryKeyHeaders(),
...generatedAPIQueryKeyHeaderContext,
...(headers as Record<string, unknown> | undefined),
};
const queryKeyHeaders = Object.fromEntries(
Object.entries(mergedHeaders)
.filter(([, value]) => value !== undefined)
.sort(([left], [right]) => left.localeCompare(right))
.map(([key, value]) => {
if (key.toLowerCase() === 'authorization' && typeof value === 'string') {
return [key, hashHeaderValue(value)];
}
return [key, value];
}),
);
return Object.keys(queryKeyHeaders).length
? [{ headers: queryKeyHeaders }]
: [];
};
export type ErrorType<Error> = AxiosError<Error>;
export type BodyType<BodyData> = BodyData;

View File

@@ -40,7 +40,6 @@ const getTraceV3 = async (
const spans: SpanV3[] = (rawPayload.spans || []).map((span: any) => ({
...span,
'service.name': span.resource?.['service.name'] || '',
timestamp: span.time_unix,
}));
// V3 API returns startTimestampMillis/endTimestampMillis as relative durations (ms from epoch offset),

View File

@@ -437,16 +437,11 @@ export function convertTraceOperatorToV5(
panelType,
);
// Skip aggregation for raw request type. Force dataSource to traces so
// createAggregation never takes the metrics branch (which would emit a
// metricName field the backend rejects for trace operators).
// Skip aggregation for raw request type
const aggregations =
requestType === 'raw'
? undefined
: createAggregation(
{ ...traceOperatorData, dataSource: DataSource.TRACES },
panelType,
);
: createAggregation(traceOperatorData, panelType);
const spec: QueryEnvelope['spec'] = {
name: queryName,

View File

@@ -1,5 +1,5 @@
import ErrorContent from 'components/ErrorModal/components/ErrorContent';
import { CircleAlert } from '@signozhq/icons';
import { CircleAlert } from 'lucide-react';
import APIError from 'types/api/error';
import './AuthError.styles.scss';

View File

@@ -1,5 +1,5 @@
import React from 'react';
import { ArrowUpRight } from '@signozhq/icons';
import { ArrowUpRight } from 'lucide-react';
import './AuthFooter.styles.scss';

View File

@@ -1,6 +1,6 @@
import { useCallback } from 'react';
import { Button } from '@signozhq/ui/button';
import { LifeBuoy } from '@signozhq/icons';
import { LifeBuoy } from 'lucide-react';
import signozBrandLogoUrl from '@/assets/Logos/signoz-brand-logo.svg';

View File

@@ -2,11 +2,10 @@ import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
import { useMutation } from 'react-query';
// eslint-disable-next-line no-restricted-imports
import { useSelector } from 'react-redux';
import { Loader, Search } from '@signozhq/icons';
import { LoadingOutlined, SearchOutlined } from '@ant-design/icons';
import { Color } from '@signozhq/design-tokens';
import {
Button,
Flex,
Input,
InputRef,
Progress,
@@ -108,11 +107,9 @@ const getColumnSearchProps = (
type="primary"
size="small"
onClick={(): void => handleSearch(selectedKeys as string[], confirm)}
icon={<SearchOutlined />}
>
<Flex align="center" gap={4}>
<Search size="md" />
Search
</Flex>
Search
</Button>
<Button
onClick={(): void => clearFilters && handleReset(clearFilters, confirm)}
@@ -134,9 +131,8 @@ const getColumnSearchProps = (
</div>
),
filterIcon: (filtered: boolean): JSX.Element => (
<Search
<SearchOutlined
style={{ color: filtered ? Color.BG_ROBIN_500 : undefined }}
size="md"
/>
),
onFilter: (value, record): boolean =>
@@ -520,9 +516,7 @@ export default function CeleryOverviewTable({
bordered={false}
loading={{
spinning: isLoading,
indicator: (
<Spin indicator={<Loader size={14} className="animate-spin" />} />
),
indicator: <Spin indicator={<LoadingOutlined size={14} spin />} />,
}}
locale={{
emptyText: isLoading ? null : <Typography.Text>No data</Typography.Text>,

View File

@@ -6,7 +6,7 @@ import logEvent from 'api/common/logEvent';
import { PANEL_TYPES } from 'constants/queryBuilder';
import dayjs from 'dayjs';
import { useIsDarkMode } from 'hooks/useDarkMode';
import { X } from '@signozhq/icons';
import { X } from 'lucide-react';
import { Widgets } from 'types/api/dashboard/getAll';
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
import { DataSource } from 'types/common/queryBuilder';

View File

@@ -6,7 +6,7 @@ import { Typography } from '@signozhq/ui/typography';
import logEvent from 'api/common/logEvent';
import { CardContainer } from 'container/GridCardLayout/styles';
import { useIsDarkMode } from 'hooks/useDarkMode';
import { ChevronDown, ChevronUp } from '@signozhq/icons';
import { ChevronDown, ChevronUp } from 'lucide-react';
import { AppState } from 'store/reducers';
import { DataTypes } from 'types/api/queryBuilder/queryAutocompleteResponse';
import { DataSource } from 'types/common/queryBuilder';

View File

@@ -1,6 +1,6 @@
import { Color } from '@signozhq/design-tokens';
import cx from 'classnames';
import { ArrowDown, ArrowUp } from '@signozhq/icons';
import { ArrowDown, ArrowUp } from 'lucide-react';
import './ChangePercentagePill.styles.scss';

View File

@@ -1,12 +1,13 @@
import { useCallback, useEffect, useRef, useState } from 'react';
import { useMutation } from 'react-query';
import { Check, ChevronsDown, ScrollText, X } from '@signozhq/icons';
import { Button, Flex, Modal } from 'antd';
import { CheckOutlined, CloseOutlined } from '@ant-design/icons';
import { Button, Modal } from 'antd';
import updateUserPreference from 'api/v1/user/preferences/name/update';
import cx from 'classnames';
import { USER_PREFERENCES } from 'constants/userPreferences';
import dayjs from 'dayjs';
import { useGetTenantLicense } from 'hooks/useGetTenantLicense';
import { ChevronsDown, ScrollText } from 'lucide-react';
import { useAppContext } from 'providers/App/App';
import { ChangelogSchema } from 'types/api/changelog/getChangelogByVersion';
import { UserPreference } from 'types/api/preferences/preference';
@@ -115,17 +116,15 @@ function ChangelogModal({ changelog, onClose }: Props): JSX.Element {
>
{!isCloudUser && (
<div className="changelog-modal-footer-ctas">
<Button type="default" onClick={onClose}>
<Flex align="center" gap="4px">
<X size="md" />
Skip for now
</Flex>
<Button type="default" icon={<CloseOutlined />} onClick={onClose}>
Skip for now
</Button>
<Button type="primary" onClick={onClickUpdateWorkspace}>
<Flex align="center" gap="4px">
<Check size="md" />
Update my workspace
</Flex>
<Button
type="primary"
icon={<CheckOutlined />}
onClick={onClickUpdateWorkspace}
>
Update my workspace
</Button>
</div>
)}

View File

@@ -6,7 +6,7 @@ import { Typography } from '@signozhq/ui/typography';
import logEvent from 'api/common/logEvent';
import updateCreditCardApi from 'api/v1/checkout/create';
import { useNotifications } from 'hooks/useNotifications';
import { CreditCard, MessageSquareText, X } from '@signozhq/icons';
import { CreditCard, MessageSquareText, X } from 'lucide-react';
import { SuccessResponseV2 } from 'types/api';
import { CheckoutSuccessPayloadProps } from 'types/api/billing/checkout';
import APIError from 'types/api/error';

View File

@@ -37,7 +37,7 @@ import { validationMapper } from 'hooks/queryBuilder/useIsValidTag';
import { operatorTypeMapper } from 'hooks/queryBuilder/useOperatorType';
import { useIsDarkMode } from 'hooks/useDarkMode';
import { isArray, isEmpty, isEqual, isObject } from 'lodash-es';
import { ChevronDown, ChevronUp } from '@signozhq/icons';
import { ChevronDown, ChevronUp } from 'lucide-react';
import type { BaseSelectRef } from 'rc-select';
import {
BaseAutocompleteData,

Some files were not shown because too many files have changed in this diff Show More