Compare commits

..

31 Commits

Author SHA1 Message Date
Karan Balani
eb661b7ac7 Merge branch 'main' into feat/billing-meterreporter 2026-04-30 14:59:59 +05:30
Karan Balani
afd6868423 Merge branch 'feat/billing-meterreporter' of github.com:SigNoz/signoz into feat/billing-meterreporter 2026-04-30 14:57:57 +05:30
Karan Balani
8ddf0a13c1 feat: make retention buckets generic 2026-04-30 14:20:44 +05:30
Nityananda Gohain
755390c4b5 feat: types and handler for llm pricing rules (#10908)
Some checks failed
build-staging / prepare (push) Has been cancelled
build-staging / js-build (push) Has been cancelled
build-staging / go-build (push) Has been cancelled
build-staging / staging (push) Has been cancelled
Release Drafter / update_release_draft (push) Has been cancelled
* feat: 1.Types for ai-o11y ricing rules

* fix: changes

* fix: minor changes

* fix: more changes

* fix: new updates

* fix: address comments

* fix: remove nullable

* fix: types

* fix: address comments

* fix: use mustnewuuid

* fix: correct table name

* fix: address comments and move pricing to a single struct

* fix: linting issues
2026-04-30 05:44:12 +00:00
SagarRajput-7
adbd89aae9 fix(platform): fix semantic tokens and component upgrade issue in platform surfaces (#11142)
* fix(platform): fix semantic tokens and component upgrade issue in platform surfaces

* fix: updated signozhq/ui version
2026-04-30 00:31:33 +00:00
primus-bot[bot]
b71de5b561 chore(release): bump to v0.121.0 (#11139)
Some checks failed
build-staging / prepare (push) Has been cancelled
build-staging / js-build (push) Has been cancelled
build-staging / go-build (push) Has been cancelled
build-staging / staging (push) Has been cancelled
Release Drafter / update_release_draft (push) Has been cancelled
Co-authored-by: primus-bot[bot] <171087277+primus-bot[bot]@users.noreply.github.com>
2026-04-29 16:08:15 +00:00
Piyush Singariya
a672335a33 fix: Body Search warning with FTS in JSON Logs (#10807)
Some checks failed
build-staging / prepare (push) Has been cancelled
build-staging / js-build (push) Has been cancelled
build-staging / go-build (push) Has been cancelled
build-staging / staging (push) Has been cancelled
Release Drafter / update_release_draft (push) Has been cancelled
* fix: fts warning miss in direct text search

* fix: comments

* test: added one more test variation

* ci: go lint

* fix: fts warning update

* fix: integration tests

* fix: go test and fmtlint
2026-04-29 08:50:28 +00:00
Karan Balani
16f0d2aa38 Merge branch 'main' into feat/billing-meterreporter 2026-04-29 13:44:24 +05:30
Karan Balani
3af912c586 chore: add tracing and logging 2026-04-29 13:28:53 +05:30
Karan Balani
ad7715802b refactor: push meters in batch for each day 2026-04-29 12:43:42 +05:30
Karan Balani
b579bdbd7b refactor: simplify some sections of tick 2026-04-29 11:32:57 +05:30
Karan Balani
aa64cf7bbf refactor: move few things to ee package 2026-04-29 10:40:48 +05:30
Karan Balani
2d33b1a743 refactor: remove HistoricalBackfillDays 2026-04-29 03:54:18 +05:30
Karan Balani
4fbf7de8e1 refactor: cleanup comments 2026-04-29 03:31:58 +05:30
Karan Balani
7528b19fd4 Merge branch 'main' into feat/billing-meterreporter 2026-04-29 01:56:01 +05:30
Karan Balani
42e4196aad feat(meterreporter): add metric and trace meters 2026-04-29 00:35:52 +05:30
Karan Balani
22cdb03702 chore: intermediate commit 2026-04-28 21:30:10 +05:30
Karan Balani
6eca3dc06e refactor: add retentiontypes 2026-04-28 21:21:08 +05:30
Karan Balani
0631189417 refactor(meterreporter): remove unused retry config 2026-04-28 20:32:19 +05:30
Karan Balani
ec552b94cc fix(meterreporter): pin retention type 2026-04-28 18:49:35 +05:30
Karan Balani
ee8d99f1d0 chore: lower HistoricalBackfillDays 2026-04-28 17:51:16 +05:30
Karan Balani
bf77e26a86 feat(meterreporter): bootstrap from data floor, emit sentinel zero-readings 2026-04-28 17:26:31 +05:30
Karan Balani
9cd3cf23d7 chore: skip meter checkpoint call temporarily 2026-04-28 16:25:45 +05:30
Karan Balani
4a44802ebc feat: improve retention period queries based on workspace ids for logs only for now 2026-04-28 13:30:44 +05:30
Karan Balani
f2aed0d834 chore: intermediate commit 2026-04-28 13:30:44 +05:30
Karan Balani
527d8c0459 feat(meterreporter): sealed-range catch-up and today-partial ticks 2026-04-28 13:30:44 +05:30
Karan Balani
8fdc91260e feat: add telemetry for collect and ship durations & improve comments 2026-04-28 13:30:44 +05:30
Karan Balani
218c4524b1 chore: update interval validation to allow min 5 mins interval for testing 2026-04-28 13:30:44 +05:30
Karan Balani
02dec846eb feat(meterreporter): add traces meters 2026-04-28 13:30:44 +05:30
Karan Balani
99dadb7247 feat(meterreporter): simplify code, add metric meters, dry-run zeus call 2026-04-28 13:30:44 +05:30
Karan Balani
44b41c40de feat: meter reporter for new billing infra 2026-04-28 13:30:41 +05:30
74 changed files with 4023 additions and 499 deletions

View File

@@ -23,6 +23,7 @@ import (
"github.com/SigNoz/signoz/pkg/global"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/licensing/nooplicensing"
"github.com/SigNoz/signoz/pkg/meterreporter"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration/implcloudintegration"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
@@ -109,6 +110,9 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
func(_ licensing.Licensing) factory.NamedMap[factory.ProviderFactory[auditor.Auditor, auditor.Config]] {
return signoz.NewAuditorProviderFactories()
},
func(_ licensing.Licensing, _ telemetrystore.TelemetryStore, _ sqlstore.SQLStore, _ organization.Getter, _ zeus.Zeus) factory.NamedMap[factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config]] {
return signoz.NewMeterReporterProviderFactories()
},
func(ps factory.ProviderSettings, q querier.Querier, a analytics.Analytics) querier.Handler {
return querier.NewHandler(ps, q, a)
},

View File

@@ -17,6 +17,7 @@ import (
"github.com/SigNoz/signoz/ee/gateway/httpgateway"
enterpriselicensing "github.com/SigNoz/signoz/ee/licensing"
"github.com/SigNoz/signoz/ee/licensing/httplicensing"
"github.com/SigNoz/signoz/ee/meterreporter/signozmeterreporter"
"github.com/SigNoz/signoz/ee/modules/cloudintegration/implcloudintegration"
"github.com/SigNoz/signoz/ee/modules/cloudintegration/implcloudintegration/implcloudprovider"
"github.com/SigNoz/signoz/ee/modules/dashboard/impldashboard"
@@ -38,6 +39,7 @@ import (
"github.com/SigNoz/signoz/pkg/gateway"
"github.com/SigNoz/signoz/pkg/global"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/meterreporter"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
pkgcloudintegration "github.com/SigNoz/signoz/pkg/modules/cloudintegration/implcloudintegration"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
@@ -157,6 +159,13 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
}
return factories
},
func(licensing licensing.Licensing, telemetryStore telemetrystore.TelemetryStore, sqlStore sqlstore.SQLStore, orgGetter organization.Getter, zeus zeus.Zeus) factory.NamedMap[factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config]] {
factories := signoz.NewMeterReporterProviderFactories()
if err := factories.Add(signozmeterreporter.NewFactory(licensing, telemetryStore, sqlStore, orgGetter, zeus)); err != nil {
panic(err)
}
return factories
},
func(ps factory.ProviderSettings, q querier.Querier, a analytics.Analytics) querier.Handler {
communityHandler := querier.NewHandler(ps, q, a)
return eequerier.NewHandler(ps, q, communityHandler)

View File

@@ -427,3 +427,14 @@ authz:
openfga:
# maximum tuples allowed per openfga write operation.
max_tuples_per_write: 100
##################### Meter Reporter #####################
meterreporter:
# Specifies the meter reporter provider to use.
# noop: does not report any meters (community default).
# signoz: periodically queries meters via the querier and ships readings to Zeus (enterprise).
provider: noop
# The interval between collection ticks. Minimum 30m.
interval: 6h
# The per-tick timeout that bounds collect-and-ship work.
timeout: 30s

View File

@@ -190,7 +190,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:v0.120.0
image: signoz/signoz:v0.121.0
ports:
- "8080:8080" # signoz port
# - "6060:6060" # pprof port

View File

@@ -117,7 +117,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:v0.120.0
image: signoz/signoz:v0.121.0
ports:
- "8080:8080" # signoz port
volumes:

View File

@@ -181,7 +181,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:${VERSION:-v0.120.0}
image: signoz/signoz:${VERSION:-v0.121.0}
container_name: signoz
ports:
- "8080:8080" # signoz port

View File

@@ -109,7 +109,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:${VERSION:-v0.120.0}
image: signoz/signoz:${VERSION:-v0.121.0}
container_name: signoz
ports:
- "8080:8080" # signoz port

View File

@@ -2632,6 +2632,158 @@ components:
- list
- grouped_list
type: string
LlmpricingruletypesGettablePricingRules:
properties:
items:
items:
$ref: '#/components/schemas/LlmpricingruletypesLLMPricingRule'
nullable: true
type: array
limit:
type: integer
offset:
type: integer
total:
type: integer
required:
- items
- total
- offset
- limit
type: object
LlmpricingruletypesLLMPricingCacheCosts:
properties:
mode:
$ref: '#/components/schemas/LlmpricingruletypesLLMPricingRuleCacheMode'
read:
format: double
type: number
write:
format: double
type: number
required:
- mode
type: object
LlmpricingruletypesLLMPricingRule:
properties:
createdAt:
format: date-time
type: string
createdBy:
type: string
enabled:
type: boolean
id:
type: string
isOverride:
type: boolean
modelName:
type: string
modelPattern:
$ref: '#/components/schemas/LlmpricingruletypesStringSlice'
orgId:
type: string
pricing:
$ref: '#/components/schemas/LlmpricingruletypesLLMRulePricing'
provider:
type: string
sourceId:
type: string
syncedAt:
format: date-time
nullable: true
type: string
unit:
$ref: '#/components/schemas/LlmpricingruletypesLLMPricingRuleUnit'
updatedAt:
format: date-time
type: string
updatedBy:
type: string
required:
- id
- orgId
- modelName
- provider
- modelPattern
- unit
- pricing
- isOverride
- enabled
type: object
LlmpricingruletypesLLMPricingRuleCacheMode:
enum:
- subtract
- additive
- unknown
type: string
LlmpricingruletypesLLMPricingRuleUnit:
enum:
- per_million_tokens
type: string
LlmpricingruletypesLLMRulePricing:
properties:
cache:
$ref: '#/components/schemas/LlmpricingruletypesLLMPricingCacheCosts'
input:
format: double
type: number
output:
format: double
type: number
required:
- input
- output
type: object
LlmpricingruletypesStringSlice:
items:
type: string
nullable: true
type: array
LlmpricingruletypesUpdatableLLMPricingRule:
properties:
enabled:
type: boolean
id:
nullable: true
type: string
isOverride:
nullable: true
type: boolean
modelName:
type: string
modelPattern:
items:
type: string
nullable: true
type: array
pricing:
$ref: '#/components/schemas/LlmpricingruletypesLLMRulePricing'
provider:
type: string
sourceId:
nullable: true
type: string
unit:
$ref: '#/components/schemas/LlmpricingruletypesLLMPricingRuleUnit'
required:
- modelName
- provider
- modelPattern
- unit
- pricing
- enabled
type: object
LlmpricingruletypesUpdatableLLMPricingRules:
properties:
rules:
items:
$ref: '#/components/schemas/LlmpricingruletypesUpdatableLLMPricingRule'
nullable: true
type: array
required:
- rules
type: object
MetricsexplorertypesInspectMetricsRequest:
properties:
end:
@@ -7675,6 +7827,218 @@ paths:
summary: Create bulk invite
tags:
- users
/api/v1/llm_pricing_rules:
get:
deprecated: false
description: Returns all LLM pricing rules for the authenticated org, with pagination.
operationId: ListLLMPricingRules
parameters:
- in: query
name: offset
schema:
type: integer
- in: query
name: limit
schema:
type: integer
responses:
"200":
content:
application/json:
schema:
properties:
data:
$ref: '#/components/schemas/LlmpricingruletypesGettablePricingRules'
status:
type: string
required:
- status
- data
type: object
description: OK
"400":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Bad Request
"401":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Unauthorized
"403":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Forbidden
"500":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Internal Server Error
security:
- api_key:
- VIEWER
- tokenizer:
- VIEWER
summary: List pricing rules
tags:
- llmpricingrules
put:
deprecated: false
description: Single write endpoint used by both the user and the Zeus sync job.
Per-rule match is by id, then sourceId, then insert. Override rows (is_override=true)
are fully preserved when the request does not provide isOverride; only synced_at
is stamped.
operationId: CreateOrUpdateLLMPricingRules
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/LlmpricingruletypesUpdatableLLMPricingRules'
responses:
"204":
description: No Content
"400":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Bad Request
"401":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Unauthorized
"403":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Forbidden
"500":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Internal Server Error
security:
- api_key:
- ADMIN
- tokenizer:
- ADMIN
summary: Create or update pricing rules
tags:
- llmpricingrules
/api/v1/llm_pricing_rules/{id}:
delete:
deprecated: false
description: Hard-deletes a pricing rule. If auto-synced, it will be recreated
on the next sync cycle.
operationId: DeleteLLMPricingRule
parameters:
- in: path
name: id
required: true
schema:
type: string
responses:
"204":
description: No Content
"401":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Unauthorized
"403":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Forbidden
"404":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Not Found
"500":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Internal Server Error
security:
- api_key:
- ADMIN
- tokenizer:
- ADMIN
summary: Delete a pricing rule
tags:
- llmpricingrules
get:
deprecated: false
description: Returns a single LLM pricing rule by ID.
operationId: GetLLMPricingRule
parameters:
- in: path
name: id
required: true
schema:
type: string
responses:
"200":
content:
application/json:
schema:
properties:
data:
$ref: '#/components/schemas/LlmpricingruletypesLLMPricingRule'
status:
type: string
required:
- status
- data
type: object
description: OK
"401":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Unauthorized
"403":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Forbidden
"404":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Not Found
"500":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Internal Server Error
security:
- api_key:
- VIEWER
- tokenizer:
- VIEWER
summary: Get a pricing rule
tags:
- llmpricingrules
/api/v1/logs/promote_paths:
get:
deprecated: false

View File

@@ -0,0 +1,291 @@
package signozmeterreporter
import (
"context"
"fmt"
"sort"
"strconv"
"strings"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/telemetrymeter"
"github.com/SigNoz/signoz/pkg/types/meterreportertypes"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/huandu/go-sqlbuilder"
)
type retentionDimensionColumn struct {
key string
alias string
}
type retentionReadingBucket struct {
dimensions map[string]string
value float64
}
func CollectLogCountMeter(ctx context.Context, deps CollectorDeps, meter Meter, orgID valuer.UUID, window Window) ([]meterreportertypes.Reading, error) {
return collectMeterSamplesByRetention(ctx, deps, meter, orgID, window, RetentionDomainLogs)
}
func CollectLogSizeMeter(ctx context.Context, deps CollectorDeps, meter Meter, orgID valuer.UUID, window Window) ([]meterreportertypes.Reading, error) {
return collectMeterSamplesByRetention(ctx, deps, meter, orgID, window, RetentionDomainLogs)
}
func CollectMetricDatapointCountMeter(ctx context.Context, deps CollectorDeps, meter Meter, orgID valuer.UUID, window Window) ([]meterreportertypes.Reading, error) {
return collectMeterSamplesByRetention(ctx, deps, meter, orgID, window, RetentionDomainMetrics)
}
func CollectMetricDatapointSizeMeter(ctx context.Context, deps CollectorDeps, meter Meter, orgID valuer.UUID, window Window) ([]meterreportertypes.Reading, error) {
return collectMeterSamplesByRetention(ctx, deps, meter, orgID, window, RetentionDomainMetrics)
}
func CollectSpanCountMeter(ctx context.Context, deps CollectorDeps, meter Meter, orgID valuer.UUID, window Window) ([]meterreportertypes.Reading, error) {
return collectMeterSamplesByRetention(ctx, deps, meter, orgID, window, RetentionDomainTraces)
}
func CollectSpanSizeMeter(ctx context.Context, deps CollectorDeps, meter Meter, orgID valuer.UUID, window Window) ([]meterreportertypes.Reading, error) {
return collectMeterSamplesByRetention(ctx, deps, meter, orgID, window, RetentionDomainTraces)
}
func collectMeterSamplesByRetention(
ctx context.Context,
deps CollectorDeps,
meter Meter,
orgID valuer.UUID,
window Window,
domain RetentionDomain,
) ([]meterreportertypes.Reading, error) {
if deps.TelemetryStore == nil {
return nil, errors.New(errors.TypeInternal, errCodeReportFailed, "telemetry store is nil")
}
if deps.SQLStore == nil {
return nil, errors.New(errors.TypeInternal, errCodeReportFailed, "sql store is nil")
}
meterName := meter.Name.String()
slices, err := loadActiveRetentionSlices(ctx, deps.SQLStore, orgID, domain, window.StartUnixMilli, window.EndUnixMilli)
if err != nil {
return nil, errors.Wrapf(err, errors.TypeInternal, errCodeReportFailed, "load retention slices for meter %q", meterName)
}
accumulator := make(map[string]*retentionReadingBucket)
for _, slice := range slices {
query, args, dimensionColumns, err := buildMeterRetentionQuery(meterName, slice)
if err != nil {
return nil, errors.Wrapf(err, errors.TypeInternal, errCodeReportFailed, "build retention query for meter %q", meterName)
}
rows, err := deps.TelemetryStore.ClickhouseDB().Query(ctx, query, args...)
if err != nil {
return nil, errors.Wrapf(err, errors.TypeInternal, errCodeReportFailed, "query meter %q slice [%d, %d)", meterName, slice.StartMs, slice.EndMs)
}
if err := func() error {
defer rows.Close()
for rows.Next() {
dimensionValues := make([]string, len(dimensionColumns))
var retentionDays int32
var retentionRuleIndex int32
var value float64
scanDest := make([]any, 0, len(dimensionValues)+3)
for i := range dimensionValues {
scanDest = append(scanDest, &dimensionValues[i])
}
scanDest = append(scanDest, &retentionDays, &retentionRuleIndex, &value)
if err := rows.Scan(scanDest...); err != nil {
return errors.Wrapf(err, errors.TypeInternal, errCodeReportFailed, "scan meter %q slice [%d, %d)", meterName, slice.StartMs, slice.EndMs)
}
dimensions, err := retentionReadingDimensions(orgID, int(retentionDays), int(retentionRuleIndex), dimensionColumns, dimensionValues, slice.Rules)
if err != nil {
return errors.Wrapf(err, errors.TypeInternal, errCodeReportFailed, "build dimensions for meter %q slice [%d, %d)", meterName, slice.StartMs, slice.EndMs)
}
key := retentionReadingBucketKey(dimensions)
bucket, ok := accumulator[key]
if !ok {
bucket = &retentionReadingBucket{dimensions: dimensions}
accumulator[key] = bucket
}
bucket.value += value
}
if err := rows.Err(); err != nil {
return errors.Wrapf(err, errors.TypeInternal, errCodeReportFailed, "iterate meter %q slice [%d, %d)", meterName, slice.StartMs, slice.EndMs)
}
return nil
}(); err != nil {
return nil, err
}
}
readings := make([]meterreportertypes.Reading, 0, len(accumulator))
for _, bucket := range accumulator {
readings = append(readings, meterreportertypes.Reading{
MeterName: meterName,
Value: bucket.value,
Unit: meter.Unit,
Aggregation: meter.Aggregation,
StartUnixMilli: window.StartUnixMilli,
EndUnixMilli: window.EndUnixMilli,
IsCompleted: window.IsCompleted,
Dimensions: bucket.dimensions,
})
}
// Zero usage is itself a billing event; the sentinel also lets Zeus's
// MAX(start_date) checkpoint advance past genuinely empty days.
if len(readings) == 0 && len(slices) > 0 {
readings = append(readings, meterreportertypes.Reading{
MeterName: meterName,
Value: 0,
Unit: meter.Unit,
Aggregation: meter.Aggregation,
StartUnixMilli: window.StartUnixMilli,
EndUnixMilli: window.EndUnixMilli,
IsCompleted: window.IsCompleted,
Dimensions: map[string]string{
dimensionOrganizationID: orgID.StringValue(),
dimensionRetentionDays: strconv.Itoa(slices[len(slices)-1].DefaultDays),
},
})
}
return readings, nil
}
func buildMeterRetentionQuery(meterName string, slice retentionSlice) (string, []any, []retentionDimensionColumn, error) {
retentionExpr, err := buildRetentionMultiIfSQL(slice.Rules, slice.DefaultDays)
if err != nil {
return "", nil, nil, err
}
retentionRuleIndexExpr, err := buildRetentionRuleIndexSQL(slice.Rules)
if err != nil {
return "", nil, nil, err
}
dimensionColumns, err := retentionDimensionColumns(slice.Rules)
if err != nil {
return "", nil, nil, err
}
selects := make([]string, 0, len(dimensionColumns)+3)
groupBy := make([]string, 0, len(dimensionColumns)+2)
for _, column := range dimensionColumns {
selects = append(selects, fmt.Sprintf("JSONExtractString(labels, '%s') AS %s", column.key, column.alias))
groupBy = append(groupBy, column.alias)
}
selects = append(selects,
retentionExpr+" AS retention_days",
retentionRuleIndexExpr+" AS retention_rule_index",
"ifNull(sum(value), 0) AS value",
)
groupBy = append(groupBy, "retention_days", "retention_rule_index")
sb := sqlbuilder.NewSelectBuilder()
sb.Select(selects...)
sb.From(telemetrymeter.DBName + "." + telemetrymeter.SamplesTableName)
sb.Where(
sb.Equal("metric_name", meterName),
sb.GTE("unix_milli", slice.StartMs),
sb.LT("unix_milli", slice.EndMs),
)
sb.GroupBy(groupBy...)
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
return query, args, dimensionColumns, nil
}
func retentionDimensionColumns(rules []retentiontypes.CustomRetentionRule) ([]retentionDimensionColumn, error) {
dimensionKeys, err := retentionRuleDimensionKeys(rules)
if err != nil {
return nil, err
}
keys := make([]string, 0, len(dimensionKeys)+1)
keys = append(keys, dimensionWorkspaceKeyID)
for _, key := range dimensionKeys {
if key == dimensionWorkspaceKeyID {
continue
}
keys = append(keys, key)
}
columns := make([]retentionDimensionColumn, len(keys))
for i, key := range keys {
columns[i] = retentionDimensionColumn{
key: key,
alias: fmt.Sprintf("dim_%d", i),
}
}
return columns, nil
}
func retentionReadingDimensions(
orgID valuer.UUID,
retentionDays int,
retentionRuleIndex int,
dimensionColumns []retentionDimensionColumn,
dimensionValues []string,
rules []retentiontypes.CustomRetentionRule,
) (map[string]string, error) {
if len(dimensionColumns) != len(dimensionValues) {
return nil, errors.Newf(errors.TypeInternal, errCodeReportFailed, "dimension column/value count mismatch: %d columns, %d values", len(dimensionColumns), len(dimensionValues))
}
valuesByKey := make(map[string]string, len(dimensionColumns))
for i, column := range dimensionColumns {
valuesByKey[column.key] = dimensionValues[i]
}
dimensions := map[string]string{
dimensionOrganizationID: orgID.StringValue(),
dimensionRetentionDays: strconv.Itoa(retentionDays),
}
addNonEmptyDimension(dimensions, dimensionWorkspaceKeyID, valuesByKey[dimensionWorkspaceKeyID])
if retentionRuleIndex < 0 {
return dimensions, nil
}
if retentionRuleIndex >= len(rules) {
return nil, errors.Newf(errors.TypeInternal, errCodeReportFailed, "retention rule index %d out of range for %d rules", retentionRuleIndex, len(rules))
}
for _, filter := range rules[retentionRuleIndex].Filters {
addNonEmptyDimension(dimensions, filter.Key, valuesByKey[filter.Key])
}
return dimensions, nil
}
func addNonEmptyDimension(dimensions map[string]string, key, value string) {
if value == "" {
return
}
dimensions[key] = value
}
func retentionReadingBucketKey(dimensions map[string]string) string {
keys := make([]string, 0, len(dimensions))
for key := range dimensions {
keys = append(keys, key)
}
sort.Strings(keys)
var builder strings.Builder
for _, key := range keys {
value := dimensions[key]
builder.WriteString(strconv.Itoa(len(key)))
builder.WriteByte(':')
builder.WriteString(key)
builder.WriteByte('=')
builder.WriteString(strconv.Itoa(len(value)))
builder.WriteByte(':')
builder.WriteString(value)
builder.WriteByte(';')
}
return builder.String()
}

View File

@@ -0,0 +1,43 @@
package signozmeterreporter
import (
"context"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/types/meterreportertypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
var errCodeReportFailed = errors.MustNewCode("meterreporter_report_failed")
// Window is the [Start, End) range a tick reports against.
// IsCompleted=true for sealed past days; false for the open today window.
type Window struct {
StartUnixMilli int64
EndUnixMilli int64
IsCompleted bool
}
type CollectorDeps struct {
TelemetryStore telemetrystore.TelemetryStore
SQLStore sqlstore.SQLStore
}
type CollectorFunc func(ctx context.Context, deps CollectorDeps, meter Meter, orgID valuer.UUID, window Window) ([]meterreportertypes.Reading, error)
const (
dimensionOrganizationID = "signoz.billing.organization.id"
dimensionRetentionDays = "signoz.billing.retention.days"
dimensionWorkspaceKeyID = "signoz.workspace.key.id"
)
// Meter is one registered billing meter. Name must be unique — Zeus
// checkpoints and upserts by it.
type Meter struct {
Name meterreportertypes.Name
Unit string
Aggregation string
Collect CollectorFunc
}

View File

@@ -0,0 +1,647 @@
package signozmeterreporter
import (
"context"
"fmt"
"log/slog"
"sync"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/meterreporter"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/telemetrymeter"
"github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/types/meterreportertypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/SigNoz/signoz/pkg/zeus"
"github.com/huandu/go-sqlbuilder"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/trace"
)
var _ factory.ServiceWithHealthy = (*Provider)(nil)
const (
phaseSealed = "sealed"
phaseToday = "today"
attrPhase = "phase"
attrResult = "result"
attrMeterReporterProvider = "meterreporter.provider"
attrOrgID = "meterreporter.org_id"
attrOrgCount = "meterreporter.org_count"
attrMeter = "meterreporter.meter"
attrDate = "meterreporter.date"
attrReadings = "meterreporter.readings"
attrReadingsCollected = "meterreporter.readings_collected"
attrReadingsDropped = "meterreporter.readings_dropped"
attrWindowStartUnixMilli = "meterreporter.window_start_unix_milli"
attrWindowEndUnixMilli = "meterreporter.window_end_unix_milli"
attrWindowCompleted = "meterreporter.window_completed"
attrCatchupStart = "meterreporter.catchup_start"
attrCatchupEnd = "meterreporter.catchup_end"
attrDurationMs = "meterreporter.duration_ms"
attrDryRun = "meterreporter.dry_run"
attrIdempotencyKey = "meterreporter.idempotency_key"
resultSuccess = "success"
resultFailure = "failure"
providerName = "signoz"
)
// Provider is the enterprise meter reporter. It ticks on a fixed interval,
// invokes every registered Collector against the instance's licensed org, and
// ships the resulting readings to Zeus. Community builds wire a noop provider
// instead, so this type never runs there.
type Provider struct {
settings factory.ScopedProviderSettings
config meterreporter.Config
meters []Meter
deps CollectorDeps
licensing licensing.Licensing
orgGetter organization.Getter
zeus zeus.Zeus
healthyC chan struct{}
stopC chan struct{}
goroutinesWg sync.WaitGroup
metrics *reporterMetrics
}
// NewFactory wires the signoz meter reporter into the provider registry. The
// returned factory is registered alongside the noop factory so the "provider"
// config field picks the right implementation at startup.
func NewFactory(
licensing licensing.Licensing,
telemetryStore telemetrystore.TelemetryStore,
sqlstore sqlstore.SQLStore,
orgGetter organization.Getter,
zeus zeus.Zeus,
) factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config] {
return factory.NewProviderFactory(
factory.MustNewName("signoz"),
func(ctx context.Context, providerSettings factory.ProviderSettings, config meterreporter.Config) (meterreporter.Reporter, error) {
return newProvider(ctx, providerSettings, config, licensing, telemetryStore, sqlstore, orgGetter, zeus)
},
)
}
func newProvider(
_ context.Context,
providerSettings factory.ProviderSettings,
config meterreporter.Config,
licensing licensing.Licensing,
telemetryStore telemetrystore.TelemetryStore,
sqlstore sqlstore.SQLStore,
orgGetter organization.Getter,
zeus zeus.Zeus,
) (*Provider, error) {
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/ee/meterreporter/signozmeterreporter")
metrics, err := newReporterMetrics(settings.Meter())
if err != nil {
return nil, err
}
meters, err := DefaultMeters()
if err != nil {
return nil, err
}
return &Provider{
settings: settings,
config: config,
meters: meters,
deps: CollectorDeps{
TelemetryStore: telemetryStore,
SQLStore: sqlstore,
},
licensing: licensing,
orgGetter: orgGetter,
zeus: zeus,
healthyC: make(chan struct{}),
stopC: make(chan struct{}),
metrics: metrics,
}, nil
}
// Start runs an initial tick, then loops on Config.Interval until Stop is
// called. It blocks until the loop goroutine returns — that shape matches the
// factory.Service contract the rest of the codebase uses, so the supervisor
// can join on it the same way as other long-running services.
func (provider *Provider) Start(ctx context.Context) error {
close(provider.healthyC)
provider.settings.Logger().InfoContext(ctx, "meter reporter started",
slog.Duration("interval", provider.config.Interval),
slog.Duration("timeout", provider.config.Timeout),
slog.Int("catchup_max_days_per_tick", provider.config.CatchupMaxDaysPerTick),
slog.Int("meters", len(provider.meters)),
)
provider.goroutinesWg.Add(1)
go func() {
defer provider.goroutinesWg.Done()
provider.runTick(ctx)
ticker := time.NewTicker(provider.config.Interval)
defer ticker.Stop()
for {
select {
case <-provider.stopC:
return
case <-ticker.C:
provider.runTick(ctx)
}
}
}()
provider.goroutinesWg.Wait()
return nil
}
// Stop signals the tick loop and waits for any in-flight tick to finish.
// Drain time is bounded by Config.Timeout because every tick runs under that
// deadline, so shutdown can't stall on a hung ClickHouse or Zeus call.
func (provider *Provider) Stop(ctx context.Context) error {
<-provider.healthyC
provider.settings.Logger().InfoContext(ctx, "meter reporter stopping")
select {
case <-provider.stopC:
// already closed
default:
close(provider.stopC)
}
provider.goroutinesWg.Wait()
provider.settings.Logger().InfoContext(ctx, "meter reporter stopped")
return nil
}
func (provider *Provider) Healthy() <-chan struct{} {
return provider.healthyC
}
// runTick executes one collect-and-ship cycle under Config.Timeout. Errors
// from tick are logged and counted only — they never propagate, because the
// reporter must keep firing on subsequent intervals even if one batch fails.
func (provider *Provider) runTick(parentCtx context.Context) {
tickStart := time.Now()
ctx, span := provider.settings.Tracer().Start(parentCtx, "meterreporter.Tick", trace.WithAttributes(
attribute.String(attrMeterReporterProvider, providerName),
attribute.Int("meterreporter.meters", len(provider.meters)),
attribute.Int("meterreporter.catchup_max_days_per_tick", provider.config.CatchupMaxDaysPerTick),
))
defer span.End()
provider.metrics.ticks.Add(ctx, 1)
ctx, cancel := context.WithTimeout(ctx, provider.config.Timeout)
defer cancel()
provider.settings.Logger().DebugContext(ctx, "meter reporter tick started",
slog.Duration("timeout", provider.config.Timeout),
slog.Int("meters", len(provider.meters)),
)
if err := provider.tick(ctx); err != nil {
span.RecordError(err)
span.SetStatus(codes.Error, err.Error())
span.SetAttributes(
attribute.String(attrResult, resultFailure),
attribute.Int64(attrDurationMs, time.Since(tickStart).Milliseconds()),
)
provider.settings.Logger().ErrorContext(ctx, "meter reporter tick failed",
errors.Attr(err),
slog.Duration("timeout", provider.config.Timeout),
slog.Duration("duration", time.Since(tickStart)),
)
return
}
span.SetAttributes(
attribute.String(attrResult, resultSuccess),
attribute.Int64(attrDurationMs, time.Since(tickStart).Milliseconds()),
)
provider.settings.Logger().DebugContext(ctx, "meter reporter tick completed", slog.Duration("duration", time.Since(tickStart)))
}
// tick runs one collect-and-ship cycle for the instance's single active org.
// Two concerns:
//
// (A) sealed catchup — forward-fills is_completed=true days from the Zeus
// checkpoint up to yesterday, capped by CatchupMaxDaysPerTick. Stops at
// the first ship failure; next tick retries from the same point.
// (B) today partial — re-emits [00:00 UTC, now) every tick as
// is_completed=false. The day-scoped X-Idempotency-Key makes
// successive writes upsert.
//
// Per-meter collect failures and ship failures are logged and counted; they
// never abort the tick.
func (provider *Provider) tick(ctx context.Context) error {
now := time.Now().UTC()
// One snapshot drives every window boundary so a tick can't straddle midnight.
todayStart := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.UTC)
yesterday := todayStart.AddDate(0, 0, -1)
orgs, err := provider.orgGetter.ListByOwnedKeyRange(ctx)
if err != nil {
return errors.Wrapf(err, errors.TypeInternal, errCodeReportFailed, "failed to list organizations")
}
trace.SpanFromContext(ctx).SetAttributes(attribute.Int(attrOrgCount, len(orgs)))
if len(orgs) == 0 {
provider.settings.Logger().InfoContext(ctx, "skipping meter reporter tick; no organizations found")
return nil
}
org := orgs[0]
if len(orgs) > 1 {
// signoz_meter samples carry no org marker, so we can't disambiguate;
// fall back to the first org and warn so the misconfig is visible.
provider.settings.Logger().WarnContext(ctx, "multiple orgs on a single instance; reporting only the first",
slog.Int("org_count", len(orgs)),
slog.String("selected_org_id", org.ID.StringValue()),
)
}
trace.SpanFromContext(ctx).SetAttributes(attribute.String(attrOrgID, org.ID.StringValue()))
license, err := provider.licensing.GetActive(ctx, org.ID)
if err != nil {
return errors.Wrapf(err, errors.TypeInternal, errCodeReportFailed, "failed to fetch active license for org %q", org.ID.StringValue())
}
if license == nil || license.Key == "" {
provider.settings.Logger().WarnContext(ctx, "skipping tick, nil/empty license for org", slog.String("org_id", org.ID.StringValue()))
return nil
}
// TODO: re-enable once /v2/meters/checkpoints is live in staging. Until
// then we run with an empty checkpoint map; bootstrap floors are taken
// from data and dropCheckpointed becomes a no-op for the sealed window.
// checkpoints, err := provider.zeus.GetMeterCheckpoints(ctx, license.Key)
// if err != nil {
// provider.metrics.checkpointErrors.Add(ctx, 1)
// provider.settings.Logger().ErrorContext(ctx, "skipping tick: meter checkpoints call failed", errors.Attr(err))
// return nil
// }
// checkpointsByMeter := make(map[string]time.Time, len(checkpoints))
// for _, checkpoint := range checkpoints {
// checkpointsByMeter[checkpoint.Name] = checkpoint.Checkpoint.UTC()
// }
checkpointsByMeter := make(map[string]time.Time)
// Concern A — sealed-range processor. catchupStart() already clamps to
// yesterday, so we can step straight into the loop.
floor := provider.dataFloor(ctx, todayStart)
catchupStart := provider.catchupStart(floor, todayStart, checkpointsByMeter)
end := catchupStart.AddDate(0, 0, provider.config.CatchupMaxDaysPerTick-1)
if end.After(yesterday) {
end = yesterday
}
trace.SpanFromContext(ctx).SetAttributes(
attribute.String(attrCatchupStart, catchupStart.Format("2006-01-02")),
attribute.String(attrCatchupEnd, end.Format("2006-01-02")),
)
provider.settings.Logger().DebugContext(ctx, "meter reporter catchup window selected",
slog.String("org_id", org.ID.StringValue()),
slog.Time("data_floor", floor),
slog.Time("catchup_start", catchupStart),
slog.Time("catchup_end", end),
slog.Int("catchup_max_days_per_tick", provider.config.CatchupMaxDaysPerTick),
)
for day := catchupStart; !day.After(end); day = day.AddDate(0, 0, 1) {
window := Window{
StartUnixMilli: day.UnixMilli(),
EndUnixMilli: day.AddDate(0, 0, 1).UnixMilli(),
IsCompleted: true,
}
err := provider.runPhase(ctx, org.ID, license.Key, window, checkpointsByMeter)
result := resultSuccess
if err != nil {
result = resultFailure
}
provider.metrics.catchupDaysProcessed.Add(ctx, 1, metric.WithAttributes(attribute.String(attrResult, result)))
if err != nil {
provider.settings.Logger().WarnContext(ctx, "stopping sealed catchup after failed day",
errors.Attr(err),
slog.String("date", day.Format("2006-01-02")),
)
break
}
}
// Concern B — today partial. Runs every tick; concern A failures don't block it.
todayWindow := Window{
StartUnixMilli: todayStart.UnixMilli(),
EndUnixMilli: now.UnixMilli(),
IsCompleted: false,
}
_ = provider.runPhase(ctx, org.ID, license.Key, todayWindow, checkpointsByMeter)
return nil
}
// runPhase collects every meter for one window and ships the resulting batch.
// Returns err only on ship failure — the sealed loop breaks on first failure.
// Per-meter collect failures are logged and counted but never bubble. For
// sealed windows, readings whose day is at-or-before the per-meter checkpoint
// are dropped to save bandwidth.
func (provider *Provider) runPhase(ctx context.Context, orgID valuer.UUID, licenseKey string, window Window, checkpointsByMeter map[string]time.Time) error {
phaseLabel := phaseToday
if window.IsCompleted {
phaseLabel = phaseSealed
}
phaseAttr := metric.WithAttributes(attribute.String(attrPhase, phaseLabel))
date := time.UnixMilli(window.StartUnixMilli).UTC().Format("2006-01-02")
phaseStart := time.Now()
ctx, span := provider.settings.Tracer().Start(ctx, "meterreporter.RunPhase", trace.WithAttributes(
attribute.String(attrPhase, phaseLabel),
attribute.String(attrOrgID, orgID.StringValue()),
attribute.String(attrDate, date),
attribute.Int64(attrWindowStartUnixMilli, window.StartUnixMilli),
attribute.Int64(attrWindowEndUnixMilli, window.EndUnixMilli),
attribute.Bool(attrWindowCompleted, window.IsCompleted),
))
defer span.End()
provider.settings.Logger().DebugContext(ctx, "meter reporter phase started",
slog.String("org_id", orgID.StringValue()),
slog.String("phase", phaseLabel),
slog.String("date", date),
slog.Int64("start_unix_milli", window.StartUnixMilli),
slog.Int64("end_unix_milli", window.EndUnixMilli),
slog.Int("meters", len(provider.meters)),
)
collectStart := time.Now()
readings := make([]meterreportertypes.Reading, 0, len(provider.meters))
for _, meter := range provider.meters {
collectStart := time.Now()
collectCtx, collectSpan := provider.settings.Tracer().Start(ctx, "meterreporter.CollectMeter", trace.WithAttributes(
attribute.String(attrPhase, phaseLabel),
attribute.String(attrOrgID, orgID.StringValue()),
attribute.String(attrMeter, meter.Name.String()),
attribute.String(attrDate, date),
attribute.Int64(attrWindowStartUnixMilli, window.StartUnixMilli),
attribute.Int64(attrWindowEndUnixMilli, window.EndUnixMilli),
attribute.Bool(attrWindowCompleted, window.IsCompleted),
))
collectedReadings, err := meter.Collect(collectCtx, provider.deps, meter, orgID, window)
if err != nil {
collectSpan.RecordError(err)
collectSpan.SetStatus(codes.Error, err.Error())
collectSpan.SetAttributes(
attribute.String(attrResult, resultFailure),
attribute.Int64(attrDurationMs, time.Since(collectStart).Milliseconds()),
)
collectSpan.End()
provider.metrics.collectErrors.Add(ctx, 1, phaseAttr)
provider.settings.Logger().WarnContext(ctx, "meter collection failed",
errors.Attr(err),
slog.String("meter", meter.Name.String()),
slog.String("org_id", orgID.StringValue()),
slog.String("phase", phaseLabel),
slog.String("date", date),
slog.Duration("duration", time.Since(collectStart)),
)
continue
}
collectSpan.SetAttributes(
attribute.String(attrResult, resultSuccess),
attribute.Int(attrReadings, len(collectedReadings)),
attribute.Int64(attrDurationMs, time.Since(collectStart).Milliseconds()),
)
collectSpan.End()
provider.settings.Logger().DebugContext(ctx, "meter collection completed",
slog.String("meter", meter.Name.String()),
slog.String("org_id", orgID.StringValue()),
slog.String("phase", phaseLabel),
slog.String("date", date),
slog.Int("readings", len(collectedReadings)),
slog.Duration("duration", time.Since(collectStart)),
)
readings = append(readings, collectedReadings...)
}
collectDuration := time.Since(collectStart)
provider.metrics.collectDuration.Record(ctx, collectDuration.Seconds(), phaseAttr)
span.SetAttributes(attribute.Int(attrReadingsCollected, len(readings)))
if window.IsCompleted {
beforeDrop := len(readings)
readings = dropCheckpointed(readings, time.UnixMilli(window.StartUnixMilli).UTC(), checkpointsByMeter)
dropped := beforeDrop - len(readings)
span.SetAttributes(attribute.Int(attrReadingsDropped, dropped))
if dropped > 0 {
provider.settings.Logger().DebugContext(ctx, "dropped checkpointed meter readings",
slog.String("org_id", orgID.StringValue()),
slog.String("phase", phaseLabel),
slog.String("date", date),
slog.Int("dropped", dropped),
slog.Int("remaining", len(readings)),
)
}
}
if len(readings) == 0 {
span.SetAttributes(
attribute.String(attrResult, resultSuccess),
attribute.Int(attrReadings, 0),
attribute.Int64(attrDurationMs, time.Since(phaseStart).Milliseconds()),
)
provider.settings.Logger().DebugContext(ctx, "meter reporter phase produced no readings",
slog.String("org_id", orgID.StringValue()),
slog.String("phase", phaseLabel),
slog.String("date", date),
slog.Duration("collect_duration", collectDuration),
slog.Duration("duration", time.Since(phaseStart)),
)
return nil
}
shipStart := time.Now()
err := provider.shipReadings(ctx, licenseKey, date, readings)
shipDuration := time.Since(shipStart)
provider.metrics.shipDuration.Record(ctx, shipDuration.Seconds(), phaseAttr)
if err != nil {
span.RecordError(err)
span.SetStatus(codes.Error, err.Error())
span.SetAttributes(attribute.String(attrResult, resultFailure))
provider.metrics.postErrors.Add(ctx, 1, phaseAttr)
provider.settings.Logger().ErrorContext(ctx, "failed to ship meter readings",
errors.Attr(err),
slog.String("phase", phaseLabel),
slog.String("date", date),
slog.Int("readings", len(readings)),
slog.Duration("ship_duration", shipDuration),
)
return err
}
provider.metrics.readingsEmitted.Add(ctx, int64(len(readings)), phaseAttr)
span.SetAttributes(
attribute.String(attrResult, resultSuccess),
attribute.Int(attrReadings, len(readings)),
attribute.Int64(attrDurationMs, time.Since(phaseStart).Milliseconds()),
)
provider.settings.Logger().InfoContext(ctx, "meter reporter phase shipped",
slog.String("org_id", orgID.StringValue()),
slog.String("phase", phaseLabel),
slog.String("date", date),
slog.Int("readings", len(readings)),
slog.Duration("collect_duration", collectDuration),
slog.Duration("ship_duration", shipDuration),
slog.Duration("duration", time.Since(phaseStart)),
)
return nil
}
// dropCheckpointed removes readings already shipped per the per-meter
// checkpoint. A reading survives if its meter has no checkpoint, or the
// checkpoint is strictly before windowDay.
func dropCheckpointed(readings []meterreportertypes.Reading, windowDay time.Time, checkpointsByMeter map[string]time.Time) []meterreportertypes.Reading {
if len(checkpointsByMeter) == 0 {
return readings
}
kept := readings[:0]
for _, reading := range readings {
checkpoint, ok := checkpointsByMeter[reading.MeterName]
if !ok || checkpoint.Before(windowDay) {
kept = append(kept, reading)
}
}
return kept
}
// catchupStart picks the earliest UTC day this tick should re-process.
// Meters with no checkpoint bootstrap from floor; older checkpoints are
// clamped up to floor. The yesterday-clamp at the bottom guarantees
// yesterday is always retried within Zeus's 24h mutable window so a
// partial-failure tick can't leave a missing (workspace, retention) bucket
// hidden behind the per-meter MAX(start_date) checkpoint.
func (provider *Provider) catchupStart(floor time.Time, todayStart time.Time, checkpointsByMeter map[string]time.Time) time.Time {
catchupStart := todayStart
for _, meter := range provider.meters {
next := floor
if checkpoint, ok := checkpointsByMeter[meter.Name.String()]; ok {
next = checkpoint.AddDate(0, 0, 1)
if next.Before(floor) {
next = floor
}
}
if next.Before(catchupStart) {
catchupStart = next
}
}
yesterday := todayStart.AddDate(0, 0, -1)
if catchupStart.After(yesterday) {
catchupStart = yesterday
}
return catchupStart
}
// dataFloor returns the earliest day signoz_meter.distributed_samples holds a
// sample, truncated to UTC midnight. With no data — or on query failure —
// returns todayStart, which the yesterday-clamp in catchupStart turns into a
// single sealed-day pass.
//
// Unfiltered by metric_name on purpose: the meter table is billing-only by
// design, so the global min spans logs/metrics/traces. Filtering would let
// earlier metric or trace data slip past the floor and under-bill on backfill.
// The CH meter-table TTL caps how old the data can ever be.
func (provider *Provider) dataFloor(ctx context.Context, todayStart time.Time) time.Time {
ctx, span := provider.settings.Tracer().Start(ctx, "meterreporter.DataFloor")
defer span.End()
if provider.deps.TelemetryStore == nil {
span.SetAttributes(attribute.String(attrResult, resultSuccess))
return todayStart
}
sb := sqlbuilder.NewSelectBuilder()
sb.Select("ifNull(min(unix_milli), 0)")
sb.From(telemetrymeter.DBName + "." + telemetrymeter.SamplesTableName)
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
var minMs int64
if err := provider.deps.TelemetryStore.ClickhouseDB().QueryRow(ctx, query, args...).Scan(&minMs); err != nil {
span.RecordError(err)
span.SetStatus(codes.Error, err.Error())
span.SetAttributes(attribute.String(attrResult, resultFailure))
provider.settings.Logger().WarnContext(ctx, "failed to read data floor; falling back to latest sealed day", errors.Attr(err))
return todayStart
}
if minMs == 0 {
span.SetAttributes(
attribute.String(attrResult, resultSuccess),
attribute.Int64("meterreporter.data_floor_unix_milli", 0),
)
return todayStart
}
minDay := time.UnixMilli(minMs).UTC()
floor := time.Date(minDay.Year(), minDay.Month(), minDay.Day(), 0, 0, 0, 0, time.UTC)
span.SetAttributes(
attribute.String(attrResult, resultSuccess),
attribute.Int64("meterreporter.data_floor_unix_milli", floor.UnixMilli()),
)
provider.settings.Logger().DebugContext(ctx, "meter reporter data floor loaded", slog.Time("data_floor", floor))
return floor
}
// shipReadings POSTs the day's batch to Zeus. The date-scoped idempotency key
// makes repeat ticks within the same UTC day UPSERT instead of duplicating.
// Zeus accepts or rejects the batch as a whole — partial acceptance is not
// supported, so a single error here means none of the readings were stored.
func (provider *Provider) shipReadings(ctx context.Context, licenseKey string, date string, readings []meterreportertypes.Reading) error {
idempotencyKey := fmt.Sprintf("meter-cron:%s", date)
ctx, span := provider.settings.Tracer().Start(ctx, "meterreporter.ShipReadings", trace.WithAttributes(
attribute.String(attrDate, date),
attribute.Int(attrReadings, len(readings)),
attribute.String(attrIdempotencyKey, idempotencyKey),
attribute.Bool(attrDryRun, true),
))
defer span.End()
provider.settings.Logger().InfoContext(ctx, "meter readings prepared for shipment",
slog.String("date", date),
slog.Int("readings", len(readings)),
slog.String("idempotency_key", idempotencyKey),
slog.Bool("dry_run", true),
)
// Staging visibility while /v2/meters is offline. Drop or demote
// to Debug once Zeus accepts the writes.
for _, reading := range readings {
provider.settings.Logger().InfoContext(ctx, "meter reading prepared for shipment",
slog.String("meter", reading.MeterName),
slog.Float64("value", reading.Value),
slog.String("unit", reading.Unit),
slog.String("aggregation", reading.Aggregation),
slog.Int64("start_unix_milli", reading.StartUnixMilli),
slog.Int64("end_unix_milli", reading.EndUnixMilli),
slog.Bool("is_completed", reading.IsCompleted),
slog.Any("dimensions", reading.Dimensions),
slog.String("idempotency_key", idempotencyKey),
)
}
// TODO: re-enable once /v2/meters is live in staging.
// body, err := json.Marshal(meterreportertypes.PostableMeterReadings{Meters: readings})
// if err != nil {
// return errors.Wrapf(err, errors.TypeInternal, errCodeReportFailed, "marshal meter readings for %s", date)
// }
// if err := provider.zeus.PutMeterReadings(ctx, licenseKey, idempotencyKey, body); err != nil {
// return errors.Wrapf(err, errors.TypeInternal, errCodeReportFailed, "ship meter readings for %s", date)
// }
_ = licenseKey
span.SetAttributes(attribute.String(attrResult, resultSuccess))
return nil
}

View File

@@ -0,0 +1,111 @@
package signozmeterreporter
import (
"testing"
"time"
"github.com/SigNoz/signoz/pkg/types/meterreportertypes"
)
func TestCatchupStartBootstrapsMissingMeter(t *testing.T) {
t.Parallel()
today := time.Date(2026, 4, 27, 0, 0, 0, 0, time.UTC)
floor := time.Date(2026, 4, 21, 0, 0, 0, 0, time.UTC)
provider := &Provider{
meters: []Meter{
{Name: meterreportertypes.MustNewName("meter.a")},
{Name: meterreportertypes.MustNewName("meter.b")},
},
}
got := provider.catchupStart(floor, today, map[string]time.Time{
"meter.a": today.AddDate(0, 0, -1),
})
if !got.Equal(floor) {
t.Fatalf("catchupStart() = %s, want %s (bootstrap from floor for meter.b)", got, floor)
}
}
func TestCatchupStartClampsOldCheckpointToFloor(t *testing.T) {
t.Parallel()
today := time.Date(2026, 4, 27, 0, 0, 0, 0, time.UTC)
floor := time.Date(2026, 4, 21, 0, 0, 0, 0, time.UTC)
provider := &Provider{
meters: []Meter{
{Name: meterreportertypes.MustNewName("meter.a")},
},
}
got := provider.catchupStart(floor, today, map[string]time.Time{
"meter.a": time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC),
})
if !got.Equal(floor) {
t.Fatalf("catchupStart() = %s, want %s (clamped to floor)", got, floor)
}
}
func TestCatchupStartClampsToYesterdayWhenAllCheckpointsAreYesterday(t *testing.T) {
t.Parallel()
today := time.Date(2026, 4, 28, 0, 0, 0, 0, time.UTC)
yesterday := today.AddDate(0, 0, -1)
floor := time.Date(2026, 4, 21, 0, 0, 0, 0, time.UTC)
provider := &Provider{
meters: []Meter{
{Name: meterreportertypes.MustNewName("meter.a")},
{Name: meterreportertypes.MustNewName("meter.b")},
},
}
got := provider.catchupStart(floor, today, map[string]time.Time{
"meter.a": yesterday,
"meter.b": yesterday,
})
if !got.Equal(yesterday) {
t.Fatalf("catchupStart() = %s, want %s (yesterday clamp)", got, yesterday)
}
}
func TestDropCheckpointed(t *testing.T) {
t.Parallel()
day := time.Date(2026, 4, 24, 0, 0, 0, 0, time.UTC)
checkpoints := map[string]time.Time{
"meter.a": day, // exactly at day → drop
"meter.b": day.AddDate(0, 0, -1), // before day → keep
}
readings := []meterreportertypes.Reading{
{MeterName: "meter.a"},
{MeterName: "meter.b"},
{MeterName: "meter.c"}, // no checkpoint → keep
}
got := dropCheckpointed(readings, day, checkpoints)
if len(got) != 2 {
t.Fatalf("len(got) = %d, want 2", len(got))
}
if got[0].MeterName != "meter.b" || got[1].MeterName != "meter.c" {
t.Fatalf("got = %+v, want [meter.b, meter.c]", got)
}
}
func TestDropCheckpointedEmptyCheckpointsKeepsAll(t *testing.T) {
t.Parallel()
readings := []meterreportertypes.Reading{
{MeterName: "meter.a"},
{MeterName: "meter.b"},
}
got := dropCheckpointed(readings, time.Now(), map[string]time.Time{})
if len(got) != len(readings) {
t.Fatalf("len(got) = %d, want %d", len(got), len(readings))
}
}

View File

@@ -0,0 +1,115 @@
package signozmeterreporter
import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/meterreporter"
"github.com/SigNoz/signoz/pkg/types/meterreportertypes"
)
// Refer to these symbols (not string literals) so typos become compile errors
// instead of silently spawning unbilled meter rows at Zeus.
var (
MeterLogCount = meterreportertypes.MustNewName("signoz.meter.log.count")
MeterLogSize = meterreportertypes.MustNewName("signoz.meter.log.size")
MeterMetricDatapointCount = meterreportertypes.MustNewName("signoz.meter.metric.datapoint.count")
MeterMetricDatapointSize = meterreportertypes.MustNewName("signoz.meter.metric.datapoint.size")
MeterSpanCount = meterreportertypes.MustNewName("signoz.meter.span.count")
MeterSpanSize = meterreportertypes.MustNewName("signoz.meter.span.size")
)
const AggregationSum = "sum"
func baseMeters() []*Meter {
meters := []*Meter{
{
Name: MeterLogCount,
Unit: "count",
Aggregation: AggregationSum,
Collect: CollectLogCountMeter,
},
{
Name: MeterLogSize,
Unit: "bytes",
Aggregation: AggregationSum,
Collect: CollectLogSizeMeter,
},
{
Name: MeterMetricDatapointCount,
Unit: "count",
Aggregation: AggregationSum,
Collect: CollectMetricDatapointCountMeter,
},
{
Name: MeterMetricDatapointSize,
Unit: "bytes",
Aggregation: AggregationSum,
Collect: CollectMetricDatapointSizeMeter,
},
{
Name: MeterSpanCount,
Unit: "count",
Aggregation: AggregationSum,
Collect: CollectSpanCountMeter,
},
{
Name: MeterSpanSize,
Unit: "bytes",
Aggregation: AggregationSum,
Collect: CollectSpanSizeMeter,
},
}
mustValidateMeters(meters...)
return meters
}
func DefaultMeters() ([]Meter, error) {
meters := baseMeters()
if err := validateMeters(meters...); err != nil {
return nil, err
}
resolved := make([]Meter, 0, len(meters))
for _, meter := range meters {
resolved = append(resolved, *meter)
}
return resolved, nil
}
func validateMeters(meters ...*Meter) error {
seen := make(map[string]struct{}, len(meters))
for _, meter := range meters {
if meter == nil {
return errors.New(errors.TypeInvalidInput, meterreporter.ErrCodeInvalidInput, "nil meter in registry")
}
if meter.Name.IsZero() {
return errors.New(errors.TypeInvalidInput, meterreporter.ErrCodeInvalidInput, "meter with empty name in registry")
}
if meter.Unit == "" {
return errors.Newf(errors.TypeInvalidInput, meterreporter.ErrCodeInvalidInput, "meter %q has no unit", meter.Name.String())
}
if meter.Aggregation == "" {
return errors.Newf(errors.TypeInvalidInput, meterreporter.ErrCodeInvalidInput, "meter %q has no aggregation", meter.Name.String())
}
if meter.Collect == nil {
return errors.Newf(errors.TypeInvalidInput, meterreporter.ErrCodeInvalidInput, "meter %q has no collector function", meter.Name.String())
}
key := meter.Name.String()
if _, ok := seen[key]; ok {
return errors.Newf(errors.TypeInvalidInput, meterreporter.ErrCodeInvalidInput, "duplicate meter %q", meter.Name.String())
}
seen[key] = struct{}{}
}
return nil
}
// Used for hardcoded registrations: a panic is a programmer error.
func mustValidateMeters(meters ...*Meter) {
if err := validateMeters(meters...); err != nil {
panic(err)
}
}

View File

@@ -0,0 +1,307 @@
package signozmeterreporter
import (
"context"
"encoding/json"
"fmt"
"regexp"
"strconv"
"strings"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/telemetrylogs"
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
"github.com/SigNoz/signoz/pkg/telemetrytraces"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type RetentionDomain string
const (
RetentionDomainLogs RetentionDomain = "logs"
RetentionDomainMetrics RetentionDomain = "metrics"
RetentionDomainTraces RetentionDomain = "traces"
)
const secondsPerDay = 24 * 60 * 60
type retentionDomainConfig struct {
tableName string
defaultDays int
}
var (
retentionDomainConfigs = map[RetentionDomain]retentionDomainConfig{
RetentionDomainLogs: {
tableName: telemetrylogs.DBName + "." + telemetrylogs.LogsV2LocalTableName,
defaultDays: retentiontypes.DefaultLogsRetentionDays,
},
RetentionDomainMetrics: {
tableName: telemetrymetrics.DBName + "." + telemetrymetrics.SamplesV4LocalTableName,
defaultDays: retentiontypes.DefaultMetricsRetentionDays,
},
RetentionDomainTraces: {
tableName: telemetrytraces.DBName + "." + telemetrytraces.SpanIndexV3LocalTableName,
defaultDays: retentiontypes.DefaultTracesRetentionDays,
},
}
// Inlined into SQL — strict allowlist guards against injection from a
// malformed ttl_setting row.
retentionLabelKeyPattern = regexp.MustCompile(`^[A-Za-z0-9_.\-]+$`)
retentionLabelValuePattern = regexp.MustCompile(`^[A-Za-z0-9_.\-:]+$`)
)
// retentionSlice is a half-open time range where one ttl_setting recipe applies.
type retentionSlice struct {
StartMs int64
EndMs int64
Rules []retentiontypes.CustomRetentionRule
DefaultDays int
}
func retentionConfigFor(domain RetentionDomain) (retentionDomainConfig, error) {
config, ok := retentionDomainConfigs[domain]
if !ok {
return retentionDomainConfig{}, errors.Newf(errors.TypeInternal, errCodeReportFailed, "retention config unavailable for domain %q", domain)
}
return config, nil
}
// loadActiveRetentionSlices returns slices covering [startMs, endMs) in
// chronological order, one per ttl_setting recipe active in that span.
func loadActiveRetentionSlices(
ctx context.Context,
sqlstore sqlstore.SQLStore,
orgID valuer.UUID,
domain RetentionDomain,
startMs, endMs int64,
) ([]retentionSlice, error) {
if startMs >= endMs {
return nil, nil
}
if sqlstore == nil {
return nil, errors.New(errors.TypeInternal, errCodeReportFailed, "sqlstore is nil")
}
config, err := retentionConfigFor(domain)
if err != nil {
return nil, err
}
rows := []*types.TTLSetting{}
err = sqlstore.
BunDB().
NewSelect().
Model(&rows).
Where("table_name = ?", config.tableName).
Where("org_id = ?", orgID.StringValue()).
Where("status = ?", types.TTLSettingStatusSuccess).
Where("created_at < ?", time.UnixMilli(endMs).UTC()).
OrderExpr("created_at ASC").
Scan(ctx)
if err != nil {
return nil, errors.Wrapf(err, errors.TypeInternal, errCodeReportFailed, "load ttl_setting rows for org %q", orgID.StringValue())
}
return buildRetentionSlicesFromRows(domain, rows, startMs, endMs)
}
func buildRetentionSlicesFromRows(domain RetentionDomain, rows []*types.TTLSetting, startMs, endMs int64) ([]retentionSlice, error) {
if startMs >= endMs {
return nil, nil
}
// The latest row at or before startMs is the active config at the
// window start; rows strictly inside become slice boundaries.
var activeAtStart *types.TTLSetting
inWindow := make([]*types.TTLSetting, 0, len(rows))
for _, row := range rows {
rowMs := row.CreatedAt.UnixMilli()
if rowMs <= startMs {
activeAtStart = row
continue
}
if rowMs >= endMs {
continue
}
inWindow = append(inWindow, row)
}
activeRules, activeDefault, err := configFromTTLSetting(domain, activeAtStart)
if err != nil {
return nil, err
}
slices := make([]retentionSlice, 0, len(inWindow)+1)
cursor := startMs
for _, row := range inWindow {
rowMs := row.CreatedAt.UnixMilli()
if rowMs <= cursor {
// Same-ms updates collapse: replace active config, no empty slice.
activeRules, activeDefault, err = configFromTTLSetting(domain, row)
if err != nil {
return nil, err
}
continue
}
slices = append(slices, retentionSlice{
StartMs: cursor,
EndMs: rowMs,
Rules: activeRules,
DefaultDays: activeDefault,
})
cursor = rowMs
activeRules, activeDefault, err = configFromTTLSetting(domain, row)
if err != nil {
return nil, err
}
}
if cursor < endMs {
slices = append(slices, retentionSlice{
StartMs: cursor,
EndMs: endMs,
Rules: activeRules,
DefaultDays: activeDefault,
})
}
return slices, nil
}
// configFromTTLSetting unpacks one ttl_setting row.
// V1 (Condition=="") stores TTL in seconds; V2 stores TTL in days.
func configFromTTLSetting(domain RetentionDomain, row *types.TTLSetting) ([]retentiontypes.CustomRetentionRule, int, error) {
config, err := retentionConfigFor(domain)
if err != nil {
return nil, 0, err
}
if row == nil {
return nil, config.defaultDays, nil
}
defaultDays := row.TTL
if row.Condition == "" {
// V1 stores seconds — round up to whole days.
defaultDays = (row.TTL + secondsPerDay - 1) / secondsPerDay
}
if defaultDays <= 0 {
defaultDays = config.defaultDays
}
if row.Condition == "" {
return nil, defaultDays, nil
}
var rules []retentiontypes.CustomRetentionRule
if err := json.Unmarshal([]byte(row.Condition), &rules); err != nil {
return nil, 0, errors.Wrapf(err, errors.TypeInternal, errCodeReportFailed, "parse ttl_setting condition for row %q", row.ID.StringValue())
}
return rules, defaultDays, nil
}
// buildRetentionMultiIfSQL renders the retention-days SELECT expression for
// one slice — first matching rule wins. The toInt32 wrapper pins the column
// type so Scan(&int32) works regardless of arm widths (ClickHouse otherwise
// infers UInt8/UInt16 from the largest arm).
func buildRetentionMultiIfSQL(rules []retentiontypes.CustomRetentionRule, defaultDays int) (string, error) {
if defaultDays <= 0 {
return "", errors.Newf(errors.TypeInternal, errCodeReportFailed, "non-positive default retention %d", defaultDays)
}
if len(rules) == 0 {
return "toInt32(" + strconv.Itoa(defaultDays) + ")", nil
}
arms := make([]string, 0, 2*len(rules)+1)
for ruleIndex, rule := range rules {
if rule.TTLDays <= 0 {
return "", errors.Newf(errors.TypeInternal, errCodeReportFailed, "rule %d has non-positive ttl_days %d", ruleIndex, rule.TTLDays)
}
conditionExpr, err := buildRetentionRuleConditionSQL(ruleIndex, rule)
if err != nil {
return "", err
}
arms = append(arms, conditionExpr)
arms = append(arms, strconv.Itoa(rule.TTLDays))
}
arms = append(arms, strconv.Itoa(defaultDays))
return "toInt32(multiIf(" + strings.Join(arms, ", ") + "))", nil
}
func buildRetentionRuleIndexSQL(rules []retentiontypes.CustomRetentionRule) (string, error) {
if len(rules) == 0 {
return "toInt32(-1)", nil
}
arms := make([]string, 0, 2*len(rules)+1)
for ruleIndex, rule := range rules {
conditionExpr, err := buildRetentionRuleConditionSQL(ruleIndex, rule)
if err != nil {
return "", err
}
arms = append(arms, conditionExpr)
arms = append(arms, strconv.Itoa(ruleIndex))
}
arms = append(arms, "-1")
return "toInt32(multiIf(" + strings.Join(arms, ", ") + "))", nil
}
func buildRetentionRuleConditionSQL(ruleIndex int, rule retentiontypes.CustomRetentionRule) (string, error) {
if len(rule.Filters) == 0 {
return "", errors.Newf(errors.TypeInternal, errCodeReportFailed, "rule %d has no filters", ruleIndex)
}
filterExprs := make([]string, 0, len(rule.Filters))
for filterIndex, filter := range rule.Filters {
if !retentionLabelKeyPattern.MatchString(filter.Key) {
return "", errors.Newf(errors.TypeInternal, errCodeReportFailed, "rule %d filter %d has invalid key %q", ruleIndex, filterIndex, filter.Key)
}
if len(filter.Values) == 0 {
return "", errors.Newf(errors.TypeInternal, errCodeReportFailed, "rule %d filter %d has no values", ruleIndex, filterIndex)
}
quoted := make([]string, len(filter.Values))
for valueIndex, value := range filter.Values {
if !retentionLabelValuePattern.MatchString(value) {
return "", errors.Newf(errors.TypeInternal, errCodeReportFailed, "rule %d filter %d value %d is invalid %q", ruleIndex, filterIndex, valueIndex, value)
}
quoted[valueIndex] = "'" + value + "'"
}
filterExprs = append(filterExprs, fmt.Sprintf("JSONExtractString(labels, '%s') IN (%s)", filter.Key, strings.Join(quoted, ", ")))
}
return strings.Join(filterExprs, " AND "), nil
}
func retentionRuleDimensionKeys(rules []retentiontypes.CustomRetentionRule) ([]string, error) {
keys := make([]string, 0)
seen := make(map[string]struct{})
for ruleIndex, rule := range rules {
for filterIndex, filter := range rule.Filters {
if !retentionLabelKeyPattern.MatchString(filter.Key) {
return nil, errors.Newf(errors.TypeInternal, errCodeReportFailed, "rule %d filter %d has invalid key %q", ruleIndex, filterIndex, filter.Key)
}
if _, ok := seen[filter.Key]; ok {
continue
}
seen[filter.Key] = struct{}{}
keys = append(keys, filter.Key)
}
}
return keys, nil
}

View File

@@ -0,0 +1,274 @@
package signozmeterreporter
import (
"reflect"
"strings"
"testing"
"time"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
// makeRow constructs a *types.TTLSetting with the fields the slice loader
// reads. Other fields are left zero.
func makeRow(createdAt time.Time, ttl int, condition string) *types.TTLSetting {
return &types.TTLSetting{
Identifiable: types.Identifiable{ID: valuer.GenerateUUID()},
TimeAuditable: types.TimeAuditable{
CreatedAt: createdAt,
UpdatedAt: createdAt,
},
TTL: ttl,
Status: types.TTLSettingStatusSuccess,
Condition: condition,
}
}
func TestBuildRetentionSlicesFromRowsNoRowsFallsBackToDefault(t *testing.T) {
t.Parallel()
startMs := time.Date(2026, 4, 28, 0, 0, 0, 0, time.UTC).UnixMilli()
endMs := time.Date(2026, 4, 29, 0, 0, 0, 0, time.UTC).UnixMilli()
slices, err := buildRetentionSlicesFromRows(RetentionDomainLogs, nil, startMs, endMs)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(slices) != 1 {
t.Fatalf("len(slices) = %d, want 1", len(slices))
}
if slices[0].StartMs != startMs || slices[0].EndMs != endMs {
t.Fatalf("slice span = [%d, %d), want [%d, %d)", slices[0].StartMs, slices[0].EndMs, startMs, endMs)
}
if slices[0].DefaultDays != retentiontypes.DefaultLogsRetentionDays {
t.Fatalf("DefaultDays = %d, want %d", slices[0].DefaultDays, retentiontypes.DefaultLogsRetentionDays)
}
if len(slices[0].Rules) != 0 {
t.Fatalf("Rules = %v, want empty", slices[0].Rules)
}
}
func TestBuildRetentionSlicesFromRowsMidWindowChangeProducesTwoSlices(t *testing.T) {
t.Parallel()
startMs := time.Date(2026, 4, 28, 0, 0, 0, 0, time.UTC).UnixMilli()
endMs := time.Date(2026, 4, 29, 0, 0, 0, 0, time.UTC).UnixMilli()
changeAt := time.Date(2026, 4, 28, 12, 0, 0, 0, time.UTC)
preWindow := makeRow(time.Date(2026, 4, 21, 15, 28, 3, 0, time.UTC), 30*secondsPerDay, "")
inWindow := makeRow(changeAt, retentiontypes.DefaultLogsRetentionDays, `[{"conditions":[{"key":"signoz.workspace.key.id","values":["a"]}],"ttlDays":90}]`)
slices, err := buildRetentionSlicesFromRows(RetentionDomainLogs, []*types.TTLSetting{preWindow, inWindow}, startMs, endMs)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(slices) != 2 {
t.Fatalf("len(slices) = %d, want 2", len(slices))
}
if slices[0].StartMs != startMs || slices[0].EndMs != changeAt.UnixMilli() {
t.Fatalf("first slice span = [%d, %d), want [%d, %d)", slices[0].StartMs, slices[0].EndMs, startMs, changeAt.UnixMilli())
}
if slices[0].DefaultDays != 30 || len(slices[0].Rules) != 0 {
t.Fatalf("first slice config = (%d, %d rules), want (30, 0)", slices[0].DefaultDays, len(slices[0].Rules))
}
if slices[1].StartMs != changeAt.UnixMilli() || slices[1].EndMs != endMs {
t.Fatalf("second slice span = [%d, %d), want [%d, %d)", slices[1].StartMs, slices[1].EndMs, changeAt.UnixMilli(), endMs)
}
if slices[1].DefaultDays != retentiontypes.DefaultLogsRetentionDays || len(slices[1].Rules) != 1 || slices[1].Rules[0].TTLDays != 90 {
t.Fatalf("second slice config = (%d, %#v), want (%d, one rule at 90)", slices[1].DefaultDays, slices[1].Rules, retentiontypes.DefaultLogsRetentionDays)
}
}
func TestBuildRetentionSlicesFromRowsBoundaryHandling(t *testing.T) {
t.Parallel()
startMs := time.Date(2026, 4, 28, 0, 0, 0, 0, time.UTC).UnixMilli()
endMs := time.Date(2026, 4, 29, 0, 0, 0, 0, time.UTC).UnixMilli()
t.Run("RowAtExactStartIsActiveConfig", func(t *testing.T) {
t.Parallel()
row := makeRow(time.UnixMilli(startMs).UTC(), 90*secondsPerDay, "")
slices, err := buildRetentionSlicesFromRows(RetentionDomainLogs, []*types.TTLSetting{row}, startMs, endMs)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(slices) != 1 || slices[0].DefaultDays != 90 {
t.Fatalf("slices = %#v, want one slice at 90 days", slices)
}
})
t.Run("RowAtOrAfterEndIsIgnored", func(t *testing.T) {
t.Parallel()
preWindow := makeRow(time.Date(2026, 4, 21, 0, 0, 0, 0, time.UTC), 30*secondsPerDay, "")
atEnd := makeRow(time.UnixMilli(endMs).UTC(), 90*secondsPerDay, "")
afterEnd := makeRow(time.UnixMilli(endMs+1).UTC(), 365*secondsPerDay, "")
slices, err := buildRetentionSlicesFromRows(RetentionDomainLogs, []*types.TTLSetting{preWindow, atEnd, afterEnd}, startMs, endMs)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(slices) != 1 || slices[0].DefaultDays != 30 {
t.Fatalf("slices = %#v, want one slice at 30 days (preWindow only)", slices)
}
})
}
func TestBuildRetentionSlicesFromRowsParsesConditionJSON(t *testing.T) {
t.Parallel()
startMs := time.Date(2026, 4, 28, 0, 0, 0, 0, time.UTC).UnixMilli()
endMs := time.Date(2026, 4, 29, 0, 0, 0, 0, time.UTC).UnixMilli()
condition := `[{"conditions":[{"key":"signoz.workspace.key.id","values":["019a1769-45aa-721f-a19a-9a8b5ae2d615"]}],"ttlDays":90},{"conditions":[{"key":"signoz.workspace.key.id","values":["019c06d5-bbe2-7e99-b614-ea2a080416ea","019c34a1-9df9-72c0-b100-4f9e38d1f26d"]}],"ttlDays":365}]`
row := makeRow(time.Date(2026, 4, 24, 15, 13, 15, 0, time.UTC), retentiontypes.DefaultLogsRetentionDays, condition)
slices, err := buildRetentionSlicesFromRows(RetentionDomainLogs, []*types.TTLSetting{row}, startMs, endMs)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(slices) != 1 {
t.Fatalf("len(slices) = %d, want 1", len(slices))
}
wantRules := []retentiontypes.CustomRetentionRule{
{Filters: []retentiontypes.FilterCondition{{Key: "signoz.workspace.key.id", Values: []string{"019a1769-45aa-721f-a19a-9a8b5ae2d615"}}}, TTLDays: 90},
{Filters: []retentiontypes.FilterCondition{{Key: "signoz.workspace.key.id", Values: []string{"019c06d5-bbe2-7e99-b614-ea2a080416ea", "019c34a1-9df9-72c0-b100-4f9e38d1f26d"}}}, TTLDays: 365},
}
if !reflect.DeepEqual(slices[0].Rules, wantRules) {
t.Fatalf("Rules = %#v, want %#v", slices[0].Rules, wantRules)
}
}
func TestBuildRetentionMultiIfSQLNoRulesCollapsesToDefault(t *testing.T) {
t.Parallel()
expr, err := buildRetentionMultiIfSQL(nil, 15)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if expr != "toInt32(15)" {
t.Fatalf("expr = %q, want %q", expr, "toInt32(15)")
}
}
func TestBuildRetentionMultiIfSQLMultipleRulesPreserveOrder(t *testing.T) {
t.Parallel()
rules := []retentiontypes.CustomRetentionRule{
{
Filters: []retentiontypes.FilterCondition{{Key: "signoz.workspace.key.id", Values: []string{"a"}}},
TTLDays: 90,
},
{
Filters: []retentiontypes.FilterCondition{{Key: "signoz.workspace.key.id", Values: []string{"b", "c"}}},
TTLDays: 365,
},
}
expr, err := buildRetentionMultiIfSQL(rules, 15)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
want := "toInt32(multiIf(JSONExtractString(labels, 'signoz.workspace.key.id') IN ('a'), 90, JSONExtractString(labels, 'signoz.workspace.key.id') IN ('b', 'c'), 365, 15))"
if expr != want {
t.Fatalf("expr = %q, want %q", expr, want)
}
}
func TestBuildRetentionMultiIfSQLMultipleFiltersAreAndedTogether(t *testing.T) {
t.Parallel()
rules := []retentiontypes.CustomRetentionRule{
{
Filters: []retentiontypes.FilterCondition{
{Key: "signoz.workspace.key.id", Values: []string{"a"}},
{Key: "service.name", Values: []string{"frontend"}},
},
TTLDays: 90,
},
}
expr, err := buildRetentionMultiIfSQL(rules, 15)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if !strings.Contains(expr, " AND ") {
t.Fatalf("expr = %q, want filters joined by AND", expr)
}
}
func TestBuildRetentionMultiIfSQLRejectsInvalidInput(t *testing.T) {
t.Parallel()
validRule := func() retentiontypes.CustomRetentionRule {
return retentiontypes.CustomRetentionRule{
Filters: []retentiontypes.FilterCondition{{Key: "signoz.workspace.key.id", Values: []string{"a"}}},
TTLDays: 90,
}
}
testCases := []struct {
name string
rules []retentiontypes.CustomRetentionRule
defaultDays int
}{
{
name: "InjectedKey",
rules: []retentiontypes.CustomRetentionRule{{
Filters: []retentiontypes.FilterCondition{{Key: "bad'key", Values: []string{"a"}}},
TTLDays: 90,
}},
defaultDays: 15,
},
{
name: "InjectedValue",
rules: []retentiontypes.CustomRetentionRule{{
Filters: []retentiontypes.FilterCondition{{Key: "signoz.workspace.key.id", Values: []string{"bad'value"}}},
TTLDays: 90,
}},
defaultDays: 15,
},
{
name: "ZeroDefault",
rules: nil,
defaultDays: 0,
},
{
name: "ZeroRuleTTL",
rules: []retentiontypes.CustomRetentionRule{func() retentiontypes.CustomRetentionRule {
rule := validRule()
rule.TTLDays = 0
return rule
}()},
defaultDays: 15,
},
{
name: "RuleWithNoFilters",
rules: []retentiontypes.CustomRetentionRule{{Filters: nil, TTLDays: 90}},
defaultDays: 15,
},
{
name: "FilterWithNoValues",
rules: []retentiontypes.CustomRetentionRule{{
Filters: []retentiontypes.FilterCondition{{Key: "signoz.workspace.key.id", Values: nil}},
TTLDays: 90,
}},
defaultDays: 15,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
if _, err := buildRetentionMultiIfSQL(tc.rules, tc.defaultDays); err == nil {
t.Fatalf("expected error")
}
})
}
}

View File

@@ -0,0 +1,76 @@
package signozmeterreporter
import (
"github.com/SigNoz/signoz/pkg/errors"
"go.opentelemetry.io/otel/metric"
)
type reporterMetrics struct {
ticks metric.Int64Counter
readingsEmitted metric.Int64Counter
collectErrors metric.Int64Counter
postErrors metric.Int64Counter
checkpointErrors metric.Int64Counter
catchupDaysProcessed metric.Int64Counter
collectDuration metric.Float64Histogram
shipDuration metric.Float64Histogram
}
func newReporterMetrics(meter metric.Meter) (*reporterMetrics, error) {
var errs error
ticks, err := meter.Int64Counter("signoz.meterreporter.ticks", metric.WithDescription("Total number of meter reporter ticks that ran to completion or aborted."))
if err != nil {
errs = errors.Join(errs, err)
}
readingsEmitted, err := meter.Int64Counter("signoz.meterreporter.readings.emitted", metric.WithDescription("Total number of meter readings shipped to Zeus."))
if err != nil {
errs = errors.Join(errs, err)
}
collectErrors, err := meter.Int64Counter("signoz.meterreporter.collect.errors", metric.WithDescription("Total number of collect errors across organizations and meters, tagged with phase={sealed|today}."))
if err != nil {
errs = errors.Join(errs, err)
}
postErrors, err := meter.Int64Counter("signoz.meterreporter.post.errors", metric.WithDescription("Total number of Zeus POST failures, tagged with phase={sealed|today}."))
if err != nil {
errs = errors.Join(errs, err)
}
checkpointErrors, err := meter.Int64Counter("signoz.meterreporter.checkpoint.errors", metric.WithDescription("Total number of ticks skipped because the Zeus GetMeterCheckpoints call failed."))
if err != nil {
errs = errors.Join(errs, err)
}
catchupDaysProcessed, err := meter.Int64Counter("signoz.meterreporter.catchup.days_processed", metric.WithDescription("Total number of sealed (is_completed=true) days the catch-up loop attempted to ship, tagged with result={success|failure}."))
if err != nil {
errs = errors.Join(errs, err)
}
collectDuration, err := meter.Float64Histogram("signoz.meterreporter.collect.duration", metric.WithDescription("Time taken to collect readings from all registered meters in a single phase of a tick, tagged with phase={sealed|today}."), metric.WithUnit("s"))
if err != nil {
errs = errors.Join(errs, err)
}
shipDuration, err := meter.Float64Histogram("signoz.meterreporter.ship.duration", metric.WithDescription("Time taken to ship (marshal + POST) collected readings to Zeus in a single phase of a tick, tagged with phase={sealed|today}."), metric.WithUnit("s"))
if err != nil {
errs = errors.Join(errs, err)
}
if errs != nil {
return nil, errs
}
return &reporterMetrics{
ticks: ticks,
readingsEmitted: readingsEmitted,
collectErrors: collectErrors,
postErrors: postErrors,
checkpointErrors: checkpointErrors,
catchupDaysProcessed: catchupDaysProcessed,
collectDuration: collectDuration,
shipDuration: shipDuration,
}, nil
}

View File

@@ -150,6 +150,72 @@ func (provider *Provider) PutMetersV2(ctx context.Context, key string, data []by
return err
}
func (provider *Provider) PutMeterReadings(ctx context.Context, key string, idempotencyKey string, data []byte) error {
headers := http.Header{}
if idempotencyKey != "" {
headers.Set("X-Idempotency-Key", idempotencyKey)
}
_, err := provider.doWithHeaders(
ctx,
provider.config.URL.JoinPath("/v2/meters"),
http.MethodPost,
key,
data,
headers,
)
return err
}
func (provider *Provider) GetMeterCheckpoints(ctx context.Context, key string) ([]zeustypes.MeterCheckpoint, error) {
response, err := provider.do(
ctx,
provider.config.URL.JoinPath("/v2/meters/checkpoints"),
http.MethodGet,
key,
nil,
)
if err != nil {
return nil, err
}
checkpointValues := gjson.GetBytes(response, "data.checkpoints")
if !checkpointValues.Exists() || checkpointValues.Type == gjson.Null {
return nil, errors.Newf(errors.TypeInternal, zeus.ErrCodeResponseMalformed, "meter checkpoints are required")
}
if !checkpointValues.IsArray() {
return nil, errors.Newf(errors.TypeInternal, zeus.ErrCodeResponseMalformed, "meter checkpoints must be an array")
}
checkpointResults := checkpointValues.Array()
checkpoints := make([]zeustypes.MeterCheckpoint, 0, len(checkpointResults))
for _, checkpointValue := range checkpointResults {
name := checkpointValue.Get("name").String()
if name == "" {
return nil, errors.Newf(errors.TypeInternal, zeus.ErrCodeResponseMalformed, "meter checkpoint name is required")
}
checkpointString := checkpointValue.Get("checkpoint").String()
if checkpointString == "" {
return nil, errors.Newf(errors.TypeInternal, zeus.ErrCodeResponseMalformed, "meter checkpoint is required for %q", name)
}
checkpoint, err := time.Parse("2006-01-02", checkpointString)
if err != nil {
return nil, errors.Wrapf(err, errors.TypeInternal, zeus.ErrCodeResponseMalformed, "parse meter checkpoint %q for %q", checkpointString, name)
}
checkpoints = append(checkpoints, zeustypes.MeterCheckpoint{
Name: name,
Checkpoint: checkpoint,
})
}
return checkpoints, nil
}
func (provider *Provider) PutProfile(ctx context.Context, key string, profile *zeustypes.PostableProfile) error {
body, err := json.Marshal(profile)
if err != nil {
@@ -185,12 +251,21 @@ func (provider *Provider) PutHost(ctx context.Context, key string, host *zeustyp
}
func (provider *Provider) do(ctx context.Context, url *url.URL, method string, key string, requestBody []byte) ([]byte, error) {
return provider.doWithHeaders(ctx, url, method, key, requestBody, nil)
}
func (provider *Provider) doWithHeaders(ctx context.Context, url *url.URL, method string, key string, requestBody []byte, extraHeaders http.Header) ([]byte, error) {
request, err := http.NewRequestWithContext(ctx, method, url.String(), bytes.NewBuffer(requestBody))
if err != nil {
return nil, err
}
request.Header.Set("X-Signoz-Cloud-Api-Key", key)
request.Header.Set("Content-Type", "application/json")
for k, vs := range extraHeaders {
for _, v := range vs {
request.Header.Add(k, v)
}
}
response, err := provider.httpClient.Do(request)
if err != nil {

View File

@@ -51,7 +51,7 @@
"@signozhq/design-tokens": "2.1.4",
"@signozhq/icons": "0.1.0",
"@signozhq/resizable": "0.0.2",
"@signozhq/ui": "0.0.10",
"@signozhq/ui": "0.0.12",
"@tanstack/react-table": "8.21.3",
"@tanstack/react-virtual": "3.13.22",
"@uiw/codemirror-theme-copilot": "4.23.11",

View File

@@ -0,0 +1,399 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import { useMutation, useQuery } from 'react-query';
import type {
InvalidateOptions,
MutationFunction,
QueryClient,
QueryFunction,
QueryKey,
UseMutationOptions,
UseMutationResult,
UseQueryOptions,
UseQueryResult,
} from 'react-query';
import type {
DeleteLLMPricingRulePathParameters,
GetLLMPricingRule200,
GetLLMPricingRulePathParameters,
ListLLMPricingRules200,
ListLLMPricingRulesParams,
LlmpricingruletypesUpdatableLLMPricingRulesDTO,
RenderErrorResponseDTO,
} from '../sigNoz.schemas';
import { GeneratedAPIInstance } from '../../../generatedAPIInstance';
import type { ErrorType, BodyType } from '../../../generatedAPIInstance';
/**
* Returns all LLM pricing rules for the authenticated org, with pagination.
* @summary List pricing rules
*/
export const listLLMPricingRules = (
params?: ListLLMPricingRulesParams,
signal?: AbortSignal,
) => {
return GeneratedAPIInstance<ListLLMPricingRules200>({
url: `/api/v1/llm_pricing_rules`,
method: 'GET',
params,
signal,
});
};
export const getListLLMPricingRulesQueryKey = (
params?: ListLLMPricingRulesParams,
) => {
return [`/api/v1/llm_pricing_rules`, ...(params ? [params] : [])] as const;
};
export const getListLLMPricingRulesQueryOptions = <
TData = Awaited<ReturnType<typeof listLLMPricingRules>>,
TError = ErrorType<RenderErrorResponseDTO>,
>(
params?: ListLLMPricingRulesParams,
options?: {
query?: UseQueryOptions<
Awaited<ReturnType<typeof listLLMPricingRules>>,
TError,
TData
>;
},
) => {
const { query: queryOptions } = options ?? {};
const queryKey =
queryOptions?.queryKey ?? getListLLMPricingRulesQueryKey(params);
const queryFn: QueryFunction<
Awaited<ReturnType<typeof listLLMPricingRules>>
> = ({ signal }) => listLLMPricingRules(params, signal);
return { queryKey, queryFn, ...queryOptions } as UseQueryOptions<
Awaited<ReturnType<typeof listLLMPricingRules>>,
TError,
TData
> & { queryKey: QueryKey };
};
export type ListLLMPricingRulesQueryResult = NonNullable<
Awaited<ReturnType<typeof listLLMPricingRules>>
>;
export type ListLLMPricingRulesQueryError = ErrorType<RenderErrorResponseDTO>;
/**
* @summary List pricing rules
*/
export function useListLLMPricingRules<
TData = Awaited<ReturnType<typeof listLLMPricingRules>>,
TError = ErrorType<RenderErrorResponseDTO>,
>(
params?: ListLLMPricingRulesParams,
options?: {
query?: UseQueryOptions<
Awaited<ReturnType<typeof listLLMPricingRules>>,
TError,
TData
>;
},
): UseQueryResult<TData, TError> & { queryKey: QueryKey } {
const queryOptions = getListLLMPricingRulesQueryOptions(params, options);
const query = useQuery(queryOptions) as UseQueryResult<TData, TError> & {
queryKey: QueryKey;
};
query.queryKey = queryOptions.queryKey;
return query;
}
/**
* @summary List pricing rules
*/
export const invalidateListLLMPricingRules = async (
queryClient: QueryClient,
params?: ListLLMPricingRulesParams,
options?: InvalidateOptions,
): Promise<QueryClient> => {
await queryClient.invalidateQueries(
{ queryKey: getListLLMPricingRulesQueryKey(params) },
options,
);
return queryClient;
};
/**
* Single write endpoint used by both the user and the Zeus sync job. Per-rule match is by id, then sourceId, then insert. Override rows (is_override=true) are fully preserved when the request does not provide isOverride; only synced_at is stamped.
* @summary Create or update pricing rules
*/
export const createOrUpdateLLMPricingRules = (
llmpricingruletypesUpdatableLLMPricingRulesDTO: BodyType<LlmpricingruletypesUpdatableLLMPricingRulesDTO>,
) => {
return GeneratedAPIInstance<void>({
url: `/api/v1/llm_pricing_rules`,
method: 'PUT',
headers: { 'Content-Type': 'application/json' },
data: llmpricingruletypesUpdatableLLMPricingRulesDTO,
});
};
export const getCreateOrUpdateLLMPricingRulesMutationOptions = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof createOrUpdateLLMPricingRules>>,
TError,
{ data: BodyType<LlmpricingruletypesUpdatableLLMPricingRulesDTO> },
TContext
>;
}): UseMutationOptions<
Awaited<ReturnType<typeof createOrUpdateLLMPricingRules>>,
TError,
{ data: BodyType<LlmpricingruletypesUpdatableLLMPricingRulesDTO> },
TContext
> => {
const mutationKey = ['createOrUpdateLLMPricingRules'];
const { mutation: mutationOptions } = options
? options.mutation &&
'mutationKey' in options.mutation &&
options.mutation.mutationKey
? options
: { ...options, mutation: { ...options.mutation, mutationKey } }
: { mutation: { mutationKey } };
const mutationFn: MutationFunction<
Awaited<ReturnType<typeof createOrUpdateLLMPricingRules>>,
{ data: BodyType<LlmpricingruletypesUpdatableLLMPricingRulesDTO> }
> = (props) => {
const { data } = props ?? {};
return createOrUpdateLLMPricingRules(data);
};
return { mutationFn, ...mutationOptions };
};
export type CreateOrUpdateLLMPricingRulesMutationResult = NonNullable<
Awaited<ReturnType<typeof createOrUpdateLLMPricingRules>>
>;
export type CreateOrUpdateLLMPricingRulesMutationBody =
BodyType<LlmpricingruletypesUpdatableLLMPricingRulesDTO>;
export type CreateOrUpdateLLMPricingRulesMutationError =
ErrorType<RenderErrorResponseDTO>;
/**
* @summary Create or update pricing rules
*/
export const useCreateOrUpdateLLMPricingRules = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof createOrUpdateLLMPricingRules>>,
TError,
{ data: BodyType<LlmpricingruletypesUpdatableLLMPricingRulesDTO> },
TContext
>;
}): UseMutationResult<
Awaited<ReturnType<typeof createOrUpdateLLMPricingRules>>,
TError,
{ data: BodyType<LlmpricingruletypesUpdatableLLMPricingRulesDTO> },
TContext
> => {
const mutationOptions =
getCreateOrUpdateLLMPricingRulesMutationOptions(options);
return useMutation(mutationOptions);
};
/**
* Hard-deletes a pricing rule. If auto-synced, it will be recreated on the next sync cycle.
* @summary Delete a pricing rule
*/
export const deleteLLMPricingRule = ({
id,
}: DeleteLLMPricingRulePathParameters) => {
return GeneratedAPIInstance<void>({
url: `/api/v1/llm_pricing_rules/${id}`,
method: 'DELETE',
});
};
export const getDeleteLLMPricingRuleMutationOptions = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof deleteLLMPricingRule>>,
TError,
{ pathParams: DeleteLLMPricingRulePathParameters },
TContext
>;
}): UseMutationOptions<
Awaited<ReturnType<typeof deleteLLMPricingRule>>,
TError,
{ pathParams: DeleteLLMPricingRulePathParameters },
TContext
> => {
const mutationKey = ['deleteLLMPricingRule'];
const { mutation: mutationOptions } = options
? options.mutation &&
'mutationKey' in options.mutation &&
options.mutation.mutationKey
? options
: { ...options, mutation: { ...options.mutation, mutationKey } }
: { mutation: { mutationKey } };
const mutationFn: MutationFunction<
Awaited<ReturnType<typeof deleteLLMPricingRule>>,
{ pathParams: DeleteLLMPricingRulePathParameters }
> = (props) => {
const { pathParams } = props ?? {};
return deleteLLMPricingRule(pathParams);
};
return { mutationFn, ...mutationOptions };
};
export type DeleteLLMPricingRuleMutationResult = NonNullable<
Awaited<ReturnType<typeof deleteLLMPricingRule>>
>;
export type DeleteLLMPricingRuleMutationError =
ErrorType<RenderErrorResponseDTO>;
/**
* @summary Delete a pricing rule
*/
export const useDeleteLLMPricingRule = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof deleteLLMPricingRule>>,
TError,
{ pathParams: DeleteLLMPricingRulePathParameters },
TContext
>;
}): UseMutationResult<
Awaited<ReturnType<typeof deleteLLMPricingRule>>,
TError,
{ pathParams: DeleteLLMPricingRulePathParameters },
TContext
> => {
const mutationOptions = getDeleteLLMPricingRuleMutationOptions(options);
return useMutation(mutationOptions);
};
/**
* Returns a single LLM pricing rule by ID.
* @summary Get a pricing rule
*/
export const getLLMPricingRule = (
{ id }: GetLLMPricingRulePathParameters,
signal?: AbortSignal,
) => {
return GeneratedAPIInstance<GetLLMPricingRule200>({
url: `/api/v1/llm_pricing_rules/${id}`,
method: 'GET',
signal,
});
};
export const getGetLLMPricingRuleQueryKey = ({
id,
}: GetLLMPricingRulePathParameters) => {
return [`/api/v1/llm_pricing_rules/${id}`] as const;
};
export const getGetLLMPricingRuleQueryOptions = <
TData = Awaited<ReturnType<typeof getLLMPricingRule>>,
TError = ErrorType<RenderErrorResponseDTO>,
>(
{ id }: GetLLMPricingRulePathParameters,
options?: {
query?: UseQueryOptions<
Awaited<ReturnType<typeof getLLMPricingRule>>,
TError,
TData
>;
},
) => {
const { query: queryOptions } = options ?? {};
const queryKey =
queryOptions?.queryKey ?? getGetLLMPricingRuleQueryKey({ id });
const queryFn: QueryFunction<
Awaited<ReturnType<typeof getLLMPricingRule>>
> = ({ signal }) => getLLMPricingRule({ id }, signal);
return {
queryKey,
queryFn,
enabled: !!id,
...queryOptions,
} as UseQueryOptions<
Awaited<ReturnType<typeof getLLMPricingRule>>,
TError,
TData
> & { queryKey: QueryKey };
};
export type GetLLMPricingRuleQueryResult = NonNullable<
Awaited<ReturnType<typeof getLLMPricingRule>>
>;
export type GetLLMPricingRuleQueryError = ErrorType<RenderErrorResponseDTO>;
/**
* @summary Get a pricing rule
*/
export function useGetLLMPricingRule<
TData = Awaited<ReturnType<typeof getLLMPricingRule>>,
TError = ErrorType<RenderErrorResponseDTO>,
>(
{ id }: GetLLMPricingRulePathParameters,
options?: {
query?: UseQueryOptions<
Awaited<ReturnType<typeof getLLMPricingRule>>,
TError,
TData
>;
},
): UseQueryResult<TData, TError> & { queryKey: QueryKey } {
const queryOptions = getGetLLMPricingRuleQueryOptions({ id }, options);
const query = useQuery(queryOptions) as UseQueryResult<TData, TError> & {
queryKey: QueryKey;
};
query.queryKey = queryOptions.queryKey;
return query;
}
/**
* @summary Get a pricing rule
*/
export const invalidateGetLLMPricingRule = async (
queryClient: QueryClient,
{ id }: GetLLMPricingRulePathParameters,
options?: InvalidateOptions,
): Promise<QueryClient> => {
await queryClient.invalidateQueries(
{ queryKey: getGetLLMPricingRuleQueryKey({ id }) },
options,
);
return queryClient;
};

View File

@@ -3413,6 +3413,170 @@ export enum InframonitoringtypesResponseTypeDTO {
list = 'list',
grouped_list = 'grouped_list',
}
export interface LlmpricingruletypesGettablePricingRulesDTO {
/**
* @type array
* @nullable true
*/
items: LlmpricingruletypesLLMPricingRuleDTO[] | null;
/**
* @type integer
*/
limit: number;
/**
* @type integer
*/
offset: number;
/**
* @type integer
*/
total: number;
}
export interface LlmpricingruletypesLLMPricingCacheCostsDTO {
mode: LlmpricingruletypesLLMPricingRuleCacheModeDTO;
/**
* @type number
* @format double
*/
read?: number;
/**
* @type number
* @format double
*/
write?: number;
}
export interface LlmpricingruletypesLLMPricingRuleDTO {
/**
* @type string
* @format date-time
*/
createdAt?: Date;
/**
* @type string
*/
createdBy?: string;
/**
* @type boolean
*/
enabled: boolean;
/**
* @type string
*/
id: string;
/**
* @type boolean
*/
isOverride: boolean;
/**
* @type string
*/
modelName: string;
modelPattern: LlmpricingruletypesStringSliceDTO;
/**
* @type string
*/
orgId: string;
pricing: LlmpricingruletypesLLMRulePricingDTO;
/**
* @type string
*/
provider: string;
/**
* @type string
*/
sourceId?: string;
/**
* @type string
* @format date-time
* @nullable true
*/
syncedAt?: Date | null;
unit: LlmpricingruletypesLLMPricingRuleUnitDTO;
/**
* @type string
* @format date-time
*/
updatedAt?: Date;
/**
* @type string
*/
updatedBy?: string;
}
export enum LlmpricingruletypesLLMPricingRuleCacheModeDTO {
subtract = 'subtract',
additive = 'additive',
unknown = 'unknown',
}
export enum LlmpricingruletypesLLMPricingRuleUnitDTO {
per_million_tokens = 'per_million_tokens',
}
export interface LlmpricingruletypesLLMRulePricingDTO {
cache?: LlmpricingruletypesLLMPricingCacheCostsDTO;
/**
* @type number
* @format double
*/
input: number;
/**
* @type number
* @format double
*/
output: number;
}
/**
* @nullable
*/
export type LlmpricingruletypesStringSliceDTO = string[] | null;
export interface LlmpricingruletypesUpdatableLLMPricingRuleDTO {
/**
* @type boolean
*/
enabled: boolean;
/**
* @type string
* @nullable true
*/
id?: string | null;
/**
* @type boolean
* @nullable true
*/
isOverride?: boolean | null;
/**
* @type string
*/
modelName: string;
/**
* @type array
* @nullable true
*/
modelPattern: string[] | null;
pricing: LlmpricingruletypesLLMRulePricingDTO;
/**
* @type string
*/
provider: string;
/**
* @type string
* @nullable true
*/
sourceId?: string | null;
unit: LlmpricingruletypesLLMPricingRuleUnitDTO;
}
export interface LlmpricingruletypesUpdatableLLMPricingRulesDTO {
/**
* @type array
* @nullable true
*/
rules: LlmpricingruletypesUpdatableLLMPricingRuleDTO[] | null;
}
export interface MetricsexplorertypesInspectMetricsRequestDTO {
/**
* @type integer
@@ -7004,6 +7168,41 @@ export type CreateInvite201 = {
status: string;
};
export type ListLLMPricingRulesParams = {
/**
* @type integer
* @description undefined
*/
offset?: number;
/**
* @type integer
* @description undefined
*/
limit?: number;
};
export type ListLLMPricingRules200 = {
data: LlmpricingruletypesGettablePricingRulesDTO;
/**
* @type string
*/
status: string;
};
export type DeleteLLMPricingRulePathParameters = {
id: string;
};
export type GetLLMPricingRulePathParameters = {
id: string;
};
export type GetLLMPricingRule200 = {
data: LlmpricingruletypesLLMPricingRuleDTO;
/**
* @type string
*/
status: string;
};
export type ListPromotedAndIndexedPaths200 = {
/**
* @type array

View File

@@ -46,6 +46,7 @@ function DeleteMemberDialog({
color="destructive"
disabled={isDeleting}
onClick={onConfirm}
loading={isDeleting}
>
<Trash2 size={12} />
{isDeleting ? 'Processing...' : title}
@@ -63,7 +64,6 @@ function DeleteMemberDialog({
}}
title={title}
width="narrow"
className="alert-dialog delete-dialog"
showCloseButton={false}
disableOutsideClick={false}
footer={footer}

View File

@@ -28,18 +28,6 @@
cursor: default;
}
&__input {
height: 32px;
background: var(--l2-background);
border-color: var(--l1-border);
color: var(--l1-foreground);
box-shadow: none;
&::placeholder {
color: var(--l3-foreground);
}
}
&__input-wrapper {
display: flex;
align-items: center;
@@ -48,7 +36,7 @@
padding: var(--padding-1) var(--padding-2);
border-radius: 2px;
background: var(--l2-background);
border: 1px solid var(--l1-border);
border: 1px solid var(--border);
box-sizing: border-box;
&--disabled {
@@ -65,8 +53,8 @@
}
&__email-text {
font-size: var(--font-size-sm);
font-weight: var(--font-weight-normal);
font-size: var(--paragraph-base-400-font-size);
font-weight: var(--paragraph-base-400-font-weight);
color: var(--foreground);
line-height: var(--line-height-18);
letter-spacing: -0.07px;
@@ -178,36 +166,6 @@
}
}
.delete-dialog {
background: var(--l2-background);
border: 1px solid var(--l1-border);
[data-slot='dialog-title'] {
color: var(--l1-foreground);
}
&__body {
font-size: var(--paragraph-base-400-font-size);
font-weight: var(--paragraph-base-400-font-weight);
color: var(--l2-foreground);
line-height: var(--paragraph-base-400-line-height);
letter-spacing: -0.065px;
margin: 0;
strong {
font-weight: var(--font-weight-medium);
color: var(--l1-foreground);
}
}
&__footer {
display: flex;
justify-content: flex-end;
gap: var(--spacing-4);
margin-top: var(--margin-6);
}
}
.reset-link-dialog {
background: var(--l2-background);
border: 1px solid var(--l1-border);
@@ -264,13 +222,6 @@
}
&__copy-btn {
flex-shrink: 0;
height: 32px;
border-radius: 0 2px 2px 0;
border-top: none;
border-right: none;
border-bottom: none;
border-left: 1px solid var(--l1-border);
min-width: 64px;
}
}

View File

@@ -224,7 +224,7 @@ function EditMemberDrawer({
try {
await rawRetry();
setSaveErrors((prev) => prev.filter((e) => e.context !== context));
refetchUser();
void refetchUser();
} catch (err) {
setSaveErrors((prev) =>
prev.map((e) =>
@@ -250,7 +250,7 @@ function EditMemberDrawer({
});
}
setSaveErrors((prev) => prev.filter((e) => e.context !== 'Name update'));
refetchUser();
void refetchUser();
} catch (err) {
setSaveErrors((prev) =>
prev.map((e) =>
@@ -319,7 +319,7 @@ function EditMemberDrawer({
}),
];
});
refetchUser();
void refetchUser();
},
});
} else {
@@ -340,7 +340,7 @@ function EditMemberDrawer({
onComplete();
}
refetchUser();
void refetchUser();
} finally {
setIsSaving(false);
}
@@ -465,7 +465,6 @@ function EditMemberDrawer({
prev.filter((err) => err.context !== 'Name update'),
);
}}
className="edit-member-drawer__input"
placeholder="Enter name"
disabled={isRootUser || isDeleted}
/>
@@ -631,7 +630,7 @@ function EditMemberDrawer({
</div>
<div className="edit-member-drawer__footer-right">
<Button variant="solid" color="secondary" onClick={handleClose}>
<Button variant="outlined" color="secondary" onClick={handleClose}>
<X size={14} />
Cancel
</Button>
@@ -641,6 +640,7 @@ function EditMemberDrawer({
color="primary"
disabled={!isDirty || isSaving || isRootUser}
onClick={handleSave}
loading={isSaving}
>
{isSaving ? 'Saving...' : 'Save Member Details'}
</Button>

View File

@@ -44,9 +44,8 @@ function ResetLinkDialog({
<span className="reset-link-dialog__link-text">{resetLink}</span>
</div>
<Button
variant="outlined"
variant="link"
color="secondary"
size="sm"
onClick={onCopy}
prefix={hasCopied ? <Check size={12} /> : <Copy size={12} />}
className="reset-link-dialog__copy-btn"

View File

@@ -1,6 +1,6 @@
import { useCallback, useEffect, useMemo, useState } from 'react';
import { Style } from '@signozhq/design-tokens';
import { ChevronDown, CircleAlert, Plus, Trash2, X } from '@signozhq/icons';
import { ChevronDown, Plus, Trash2, X } from '@signozhq/icons';
import {
Button,
Callout,
@@ -294,10 +294,8 @@ function InviteMembersModal({
type="error"
size="small"
showIcon
icon={<CircleAlert size={12} />}
>
{getValidationErrorMessage()}
</Callout>
title={getValidationErrorMessage()}
/>
</div>
)}
</div>

View File

@@ -87,7 +87,7 @@
input {
color: var(--l1-foreground);
font-size: var(--font-size-sm);
font-size: var(--font-size-xs);
}
.ant-picker-suffix {
@@ -126,12 +126,6 @@
}
&__copy-btn {
flex-shrink: 0;
height: 32px;
border-radius: 0 2px 2px 0;
border-top: none;
border-right: none;
border-bottom: none;
border-left: 1px solid var(--l1-border);
min-width: 40px;
}
@@ -152,6 +146,7 @@
color: var(--foreground);
letter-spacing: 0.48px;
text-transform: uppercase;
margin-bottom: var(--spacing-4);
}
&__footer {

View File

@@ -22,9 +22,8 @@ function KeyCreatedPhase({
<div className="add-key-modal__key-display">
<span className="add-key-modal__key-text">{createdKey.key}</span>
<Button
variant="outlined"
variant="link"
color="secondary"
size="sm"
onClick={onCopy}
className="add-key-modal__copy-btn"
>

View File

@@ -106,7 +106,7 @@ function KeyFormPhase({
<div className="add-key-modal__footer">
<div className="add-key-modal__footer-right">
<Button variant="solid" color="secondary" size="sm" onClick={onClose}>
<Button variant="solid" color="secondary" onClick={onClose}>
Cancel
</Button>
<Button
@@ -115,7 +115,6 @@ function KeyFormPhase({
form={FORM_ID}
variant="solid"
color="primary"
size="sm"
loading={isSubmitting}
disabled={!isValid}
>

View File

@@ -136,7 +136,7 @@ function EditKeyForm({
</form>
<div className="edit-key-modal__footer">
<Button variant="ghost" color="destructive" onClick={onRevokeClick}>
<Button variant="link" color="destructive" onClick={onRevokeClick}>
<Trash2 size={12} />
Revoke Key
</Button>

View File

@@ -119,7 +119,7 @@
input {
color: var(--l1-foreground);
font-size: 13px;
font-size: var(--font-size-xs);
}
.ant-picker-suffix {

View File

@@ -20,7 +20,7 @@ import { useErrorModal } from 'providers/ErrorModalProvider';
import { useTimezone } from 'providers/Timezone';
import APIError from 'types/api/error';
import { RevokeKeyContent } from '../RevokeKeyModal';
import { RevokeKeyFooter } from '../RevokeKeyModal';
import EditKeyForm from './EditKeyForm';
import type { FormValues } from './types';
import { DEFAULT_FORM_VALUES, ExpiryMode } from './types';
@@ -158,17 +158,25 @@ function EditKeyModal({ keyItem }: EditKeyModalProps): JSX.Element {
}
width={isRevokeConfirmOpen ? 'narrow' : 'base'}
className={
isRevokeConfirmOpen ? 'alert-dialog delete-dialog' : 'edit-key-modal'
isRevokeConfirmOpen ? 'alert-dialog sa-delete-dialog' : 'edit-key-modal'
}
showCloseButton={!isRevokeConfirmOpen}
disableOutsideClick={isErrorModalVisible}
footer={
isRevokeConfirmOpen ? (
<RevokeKeyFooter
isRevoking={isRevoking}
onCancel={(): void => setIsRevokeConfirmOpen(false)}
onConfirm={handleRevoke}
/>
) : undefined
}
>
{isRevokeConfirmOpen ? (
<RevokeKeyContent
isRevoking={isRevoking}
onCancel={(): void => setIsRevokeConfirmOpen(false)}
onConfirm={handleRevoke}
/>
<>
Revoking this key will permanently invalidate it. Any systems using this
key will lose access immediately.
</>
) : (
<EditKeyForm
register={register}

View File

@@ -72,7 +72,6 @@ function OverviewTab({
id="sa-name"
value={localName}
onChange={(e): void => onNameChange(e.target.value)}
className="sa-drawer__input"
placeholder="Enter name"
/>
)}

View File

@@ -17,39 +17,32 @@ import { parseAsString, useQueryState } from 'nuqs';
import { useErrorModal } from 'providers/ErrorModalProvider';
import APIError from 'types/api/error';
export interface RevokeKeyContentProps {
export interface RevokeKeyFooterProps {
isRevoking: boolean;
onCancel: () => void;
onConfirm: () => void;
}
export function RevokeKeyContent({
export function RevokeKeyFooter({
isRevoking,
onCancel,
onConfirm,
}: RevokeKeyContentProps): JSX.Element {
}: RevokeKeyFooterProps): JSX.Element {
return (
<>
<p className="delete-dialog__body">
Revoking this key will permanently invalidate it. Any systems using this key
will lose access immediately.
</p>
<div className="delete-dialog__footer">
<Button variant="solid" color="secondary" size="sm" onClick={onCancel}>
<X size={12} />
Cancel
</Button>
<Button
variant="solid"
color="destructive"
size="sm"
loading={isRevoking}
onClick={onConfirm}
>
<Trash2 size={12} />
Revoke Key
</Button>
</div>
<Button variant="solid" color="secondary" onClick={onCancel}>
<X size={12} />
Cancel
</Button>
<Button
variant="solid"
color="destructive"
loading={isRevoking}
onClick={onConfirm}
>
<Trash2 size={12} />
Revoke Key
</Button>
</>
);
}
@@ -112,15 +105,19 @@ function RevokeKeyModal(): JSX.Element {
}}
title={`Revoke ${keyName ?? 'key'}?`}
width="narrow"
className="alert-dialog delete-dialog"
className="alert-dialog sa-delete-dialog"
showCloseButton={false}
disableOutsideClick={isErrorModalVisible}
footer={
<RevokeKeyFooter
isRevoking={isRevoking}
onCancel={handleCancel}
onConfirm={handleConfirm}
/>
}
>
<RevokeKeyContent
isRevoking={isRevoking}
onCancel={handleCancel}
onConfirm={handleConfirm}
/>
Revoking this key will permanently invalidate it. Any systems using this key
will lose access immediately.
</DialogWrapper>
);
}

View File

@@ -57,6 +57,8 @@
color: var(--l1-foreground);
}
}
min-width: 220px;
}
&__tab {
@@ -166,18 +168,6 @@
cursor: default;
}
&__input {
height: 32px;
background: var(--l2-background);
border-color: var(--l1-border);
color: var(--l1-foreground);
box-shadow: none;
&::placeholder {
color: var(--l3-foreground);
}
}
&__input-wrapper {
display: flex;
align-items: center;
@@ -186,7 +176,7 @@
padding: 0 var(--padding-2);
border-radius: 2px;
background: var(--l2-background);
border: 1px solid var(--l1-border);
border: 1px solid var(--border);
&--disabled {
cursor: not-allowed;
@@ -195,8 +185,8 @@
}
&__input-text {
font-size: var(--font-size-sm);
font-weight: var(--font-weight-normal);
font-size: var(--paragraph-base-400-font-size);
font-weight: var(--paragraph-base-400-font-weight);
color: var(--foreground);
line-height: var(--line-height-18);
letter-spacing: -0.07px;

View File

@@ -129,7 +129,7 @@ function ServiceAccountDrawer({
useEffect(() => {
if (account?.id) {
setLocalName(account?.name ?? '');
setKeysPage(1);
void setKeysPage(1);
}
}, [account?.id, account?.name, setKeysPage]);
@@ -176,7 +176,7 @@ function ServiceAccountDrawer({
}
const maxPage = Math.max(1, Math.ceil(keys.length / PAGE_SIZE));
if (keysPage > maxPage) {
setKeysPage(maxPage);
void setKeysPage(maxPage);
}
}, [keysLoading, keys.length, keysPage, setKeysPage]);
@@ -214,8 +214,8 @@ function ServiceAccountDrawer({
data: { name: localName },
});
setSaveErrors((prev) => prev.filter((e) => e.context !== 'Name update'));
refetchAccount();
queryClient.invalidateQueries(getListServiceAccountsQueryKey());
void refetchAccount();
void queryClient.invalidateQueries(getListServiceAccountsQueryKey());
} catch (err) {
setSaveErrors((prev) =>
prev.map((e) =>
@@ -337,8 +337,8 @@ function ServiceAccountDrawer({
onSuccess({ closeDrawer: false });
}
refetchAccount();
queryClient.invalidateQueries(getListServiceAccountsQueryKey());
void refetchAccount();
void queryClient.invalidateQueries(getListServiceAccountsQueryKey());
} finally {
setIsSaving(false);
}
@@ -357,12 +357,12 @@ function ServiceAccountDrawer({
]);
const handleClose = useCallback((): void => {
setIsDeleteOpen(null);
setIsAddKeyOpen(null);
setSelectedAccountId(null);
setActiveTab(null);
setKeysPage(null);
setEditKeyId(null);
void setIsDeleteOpen(null);
void setIsAddKeyOpen(null);
void setSelectedAccountId(null);
void setActiveTab(null);
void setKeysPage(null);
void setEditKeyId(null);
setSaveErrors([]);
}, [
setSelectedAccountId,
@@ -379,12 +379,13 @@ function ServiceAccountDrawer({
<ToggleGroup
type="single"
value={activeTab}
size="sm"
onChange={(val): void => {
if (val) {
setActiveTab(val as ServiceAccountDrawerTab);
void setActiveTab(val as ServiceAccountDrawerTab);
if (val !== ServiceAccountDrawerTab.Keys) {
setKeysPage(null);
setEditKeyId(null);
void setKeysPage(null);
void setEditKeyId(null);
}
}
}}
@@ -415,7 +416,7 @@ function ServiceAccountDrawer({
color="secondary"
disabled={isDeleted}
onClick={(): void => {
setIsAddKeyOpen(true);
void setIsAddKeyOpen(true);
}}
>
<Plus size={12} />
@@ -503,7 +504,7 @@ function ServiceAccountDrawer({
variant="link"
color="destructive"
onClick={(): void => {
setIsDeleteOpen(true);
void setIsDeleteOpen(true);
}}
>
<Trash2 size={12} />
@@ -512,7 +513,7 @@ function ServiceAccountDrawer({
)}
{!isDeleted && (
<div className="sa-drawer__footer-right">
<Button variant="solid" color="secondary" onClick={handleClose}>
<Button variant="outlined" color="secondary" onClick={handleClose}>
<X size={14} />
Cancel
</Button>

View File

@@ -78,6 +78,7 @@
display: flex;
align-items: center;
gap: var(--spacing-10);
padding-left: 18px;
}
.custom-domain-card-meta-row.workspace-name-hidden {
@@ -124,30 +125,6 @@
}
}
.workspace-url-trigger {
display: inline-flex;
align-items: center;
gap: var(--spacing-3);
background: none;
border: none;
padding: 0;
cursor: pointer;
color: var(--l1-foreground);
font-size: var(--font-size-xs);
line-height: var(--line-height-18);
letter-spacing: -0.06px;
&:disabled {
opacity: 0.5;
cursor: not-allowed;
}
svg {
flex-shrink: 0;
color: var(--l2-foreground);
}
}
.workspace-url-dropdown {
border-radius: 4px;
border: 1px solid var(--l1-border);

View File

@@ -204,6 +204,7 @@ export default function CustomDomainSettings(): JSX.Element {
>
<Dropdown
trigger={['click']}
disabled={isFetchingHosts}
dropdownRender={(): JSX.Element => (
<div className="workspace-url-dropdown">
<span className="workspace-url-dropdown-header">
@@ -239,12 +240,7 @@ export default function CustomDomainSettings(): JSX.Element {
</div>
)}
>
<Button
className="workspace-url-trigger"
disabled={isFetchingHosts}
variant="link"
color="none"
>
<Button variant="link" color="none">
<Link2 size={12} />
<span>{stripProtocol(activeHost?.url ?? '')}</span>
<ChevronDown size={12} />

View File

@@ -89,25 +89,4 @@
) !important;
}
}
&__add-btn {
width: 100%;
// Ensure icon is visible
svg,
[class*='icon'] {
color: var(--l2-foreground) !important;
display: inline-block !important;
opacity: 1 !important;
}
&:hover {
color: var(--l1-foreground);
svg,
[class*='icon'] {
color: var(--l1-foreground) !important;
}
}
}
}

View File

@@ -69,10 +69,10 @@ function DomainMappingList({
))}
<Button
variant="dashed"
variant="outlined"
color="secondary"
onClick={(): void => add({ domain: '', adminEmail: '' })}
prefix={<Plus size={14} />}
className="domain-mapping-list__add-btn"
>
Add Domain Mapping
</Button>

View File

@@ -51,35 +51,6 @@
border-radius: 2px;
}
}
// todo: https://github.com/SigNoz/components/issues/116
.roles-search-wrapper {
flex: 1;
input {
width: 100%;
background: var(--l3-background);
border: 1px solid var(--l1-border);
border-radius: 2px;
padding: 6px 6px 6px 8px;
font-family: Inter;
font-size: 14px;
font-weight: 400;
line-height: 18px;
letter-spacing: -0.07px;
color: var(--l1-foreground);
outline: none;
height: 32px;
&::placeholder {
color: var(--l3-foreground);
}
&:focus {
border-color: var(--input);
}
}
}
}
.roles-description-tooltip {

View File

@@ -22,14 +22,12 @@ function RolesSettings(): JSX.Element {
</div>
<div className="roles-settings-content">
<div className="roles-settings-toolbar">
<div className="roles-search-wrapper">
<Input
type="search"
placeholder="Search for roles..."
value={searchQuery}
onChange={(e): void => setSearchQuery(e.target.value)}
/>
</div>
<Input
type="search"
placeholder="Search for roles..."
value={searchQuery}
onChange={(e): void => setSearchQuery(e.target.value)}
/>
{IS_ROLE_DETAILS_AND_CRUD_ENABLED && (
<Button
variant="solid"

View File

@@ -720,6 +720,10 @@ notifications - 2050
animation: spin 1s linear infinite;
}
.animate-fast-spin {
animation: spin 0.5s linear infinite;
}
// Custom legend tooltip for immediate display
.legend-tooltip {
position: fixed;

View File

@@ -5586,10 +5586,10 @@
tailwind-merge "^2.5.2"
tailwindcss-animate "^1.0.7"
"@signozhq/ui@0.0.10":
version "0.0.10"
resolved "https://registry.yarnpkg.com/@signozhq/ui/-/ui-0.0.10.tgz#cdbab838f8cb543cf5b483a86e9d9b65265b81ff"
integrity sha512-XLeET+PgSP7heqKMsb9YZOSRT3TpfMPHNQRnY1I4SK8mXSct7BYWwK0Q3Je0uf4Z3aWOcpRYoRUPHWZQBpweFQ==
"@signozhq/ui@0.0.12":
version "0.0.12"
resolved "https://registry.yarnpkg.com/@signozhq/ui/-/ui-0.0.12.tgz#b623c1729a0d85532d555fe7e756f3a4207e8e5d"
integrity sha512-69XS/j9R+uTNMdupyjki/WK1j0d5K5j0/pJrINGiteQRRrPg/AOMue7v/W6dkLICRhXcz/mgI6tLeT2FAuzKFw==
dependencies:
"@chenglou/pretext" "^0.0.5"
"@radix-ui/react-checkbox" "^1.2.3"
@@ -5611,7 +5611,7 @@
clsx "^2.1.1"
cmdk "^1.1.1"
dayjs "^1.11.10"
lodash-es "^4.17.21"
lodash-es "^4.18.1"
motion "^11.11.17"
next-themes "^0.4.6"
nuqs "^2.8.9"
@@ -13291,6 +13291,11 @@ lodash-es@4, lodash-es@^4.17.21:
resolved "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz"
integrity sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==
lodash-es@^4.18.1:
version "4.18.1"
resolved "https://registry.yarnpkg.com/lodash-es/-/lodash-es-4.18.1.tgz#b962eeb80d9d983a900bf342961fb7418ca10b1d"
integrity sha512-J8xewKD/Gk22OZbhpOVSwcs60zhd95ESDwezOFuA3/099925PdHJ7OFHNTGtajL3AlZkykD32HykiMo+BIBI8A==
lodash.camelcase@^4.3.0:
version "4.3.0"
resolved "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz"

View File

@@ -0,0 +1,93 @@
package signozapiserver
import (
"net/http"
"github.com/SigNoz/signoz/pkg/http/handler"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/llmpricingruletypes"
"github.com/gorilla/mux"
)
func (provider *provider) addLLMPricingRuleRoutes(router *mux.Router) error {
if err := router.Handle("/api/v1/llm_pricing_rules", handler.New(
provider.authZ.ViewAccess(provider.llmPricingRuleHandler.List),
handler.OpenAPIDef{
ID: "ListLLMPricingRules",
Tags: []string{"llmpricingrules"},
Summary: "List pricing rules",
Description: "Returns all LLM pricing rules for the authenticated org, with pagination.",
Request: nil,
RequestContentType: "",
RequestQuery: new(llmpricingruletypes.ListPricingRulesQuery),
Response: new(llmpricingruletypes.GettablePricingRules),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusBadRequest},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
},
)).Methods(http.MethodGet).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v1/llm_pricing_rules", handler.New(
provider.authZ.AdminAccess(provider.llmPricingRuleHandler.CreateOrUpdate),
handler.OpenAPIDef{
ID: "CreateOrUpdateLLMPricingRules",
Tags: []string{"llmpricingrules"},
Summary: "Create or update pricing rules",
Description: "Single write endpoint used by both the user and the Zeus sync job. Per-rule match is by id, then sourceId, then insert. Override rows (is_override=true) are fully preserved when the request does not provide isOverride; only synced_at is stamped.",
Request: new(llmpricingruletypes.UpdatableLLMPricingRules),
RequestContentType: "application/json",
SuccessStatusCode: http.StatusNoContent,
ErrorStatusCodes: []int{http.StatusBadRequest},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
},
)).Methods(http.MethodPut).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v1/llm_pricing_rules/{id}", handler.New(
provider.authZ.ViewAccess(provider.llmPricingRuleHandler.Get),
handler.OpenAPIDef{
ID: "GetLLMPricingRule",
Tags: []string{"llmpricingrules"},
Summary: "Get a pricing rule",
Description: "Returns a single LLM pricing rule by ID.",
Request: nil,
RequestContentType: "",
Response: new(llmpricingruletypes.GettableLLMPricingRule),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusNotFound},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
},
)).Methods(http.MethodGet).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v1/llm_pricing_rules/{id}", handler.New(
provider.authZ.AdminAccess(provider.llmPricingRuleHandler.Delete),
handler.OpenAPIDef{
ID: "DeleteLLMPricingRule",
Tags: []string{"llmpricingrules"},
Summary: "Delete a pricing rule",
Description: "Hard-deletes a pricing rule. If auto-synced, it will be recreated on the next sync cycle.",
Request: nil,
RequestContentType: "",
Response: nil,
ResponseContentType: "",
SuccessStatusCode: http.StatusNoContent,
ErrorStatusCodes: []int{http.StatusNotFound},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
},
)).Methods(http.MethodDelete).GetError(); err != nil {
return err
}
return nil
}

View File

@@ -17,6 +17,7 @@ import (
"github.com/SigNoz/signoz/pkg/modules/dashboard"
"github.com/SigNoz/signoz/pkg/modules/fields"
"github.com/SigNoz/signoz/pkg/modules/inframonitoring"
"github.com/SigNoz/signoz/pkg/modules/llmpricingrule"
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/modules/preference"
@@ -67,6 +68,7 @@ type provider struct {
alertmanagerHandler alertmanager.Handler
traceDetailHandler tracedetail.Handler
rulerHandler ruler.Handler
llmPricingRuleHandler llmpricingrule.Handler
}
func NewFactory(
@@ -96,6 +98,7 @@ func NewFactory(
ruleStateHistoryHandler rulestatehistory.Handler,
spanMapperHandler spanmapper.Handler,
alertmanagerHandler alertmanager.Handler,
llmPricingRuleHandler llmpricingrule.Handler,
traceDetailHandler tracedetail.Handler,
rulerHandler ruler.Handler,
) factory.ProviderFactory[apiserver.APIServer, apiserver.Config] {
@@ -130,6 +133,7 @@ func NewFactory(
ruleStateHistoryHandler,
spanMapperHandler,
alertmanagerHandler,
llmPricingRuleHandler,
traceDetailHandler,
rulerHandler,
)
@@ -166,6 +170,7 @@ func newProvider(
ruleStateHistoryHandler rulestatehistory.Handler,
spanMapperHandler spanmapper.Handler,
alertmanagerHandler alertmanager.Handler,
llmPricingRuleHandler llmpricingrule.Handler,
traceDetailHandler tracedetail.Handler,
rulerHandler ruler.Handler,
) (apiserver.APIServer, error) {
@@ -202,6 +207,7 @@ func newProvider(
alertmanagerHandler: alertmanagerHandler,
traceDetailHandler: traceDetailHandler,
rulerHandler: rulerHandler,
llmPricingRuleHandler: llmPricingRuleHandler,
}
provider.authZ = middleware.NewAuthZ(settings.Logger(), orgGetter, authz)
@@ -314,6 +320,10 @@ func (provider *provider) AddToRouter(router *mux.Router) error {
return err
}
if err := provider.addLLMPricingRuleRoutes(router); err != nil {
return err
}
if err := provider.addTraceDetailRoutes(router); err != nil {
return err
}

View File

@@ -0,0 +1,66 @@
package meterreporter
import (
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
)
var _ factory.Config = (*Config)(nil)
type Config struct {
// Provider picks the reporter implementation. "noop" is the default and is
// what community builds ship; "signoz" is the enterprise cron-based reporter.
Provider string `mapstructure:"provider"`
// Interval is how often the reporter ticks (collect + ship). The validator
// enforces a 5m floor — any sooner and we'd hammer ClickHouse for nothing,
// since Zeus UPSERTs inside a UTC day anyway.
Interval time.Duration `mapstructure:"interval"`
// Timeout bounds a single tick (collect + marshal + POST). Must be strictly
// less than Interval so a slow tick can't overlap the next one. Catch-up
// ticks can issue up to CatchupMaxDaysPerTick day-scoped POSTs back-to-back,
// so the default is sized to cover that.
Timeout time.Duration `mapstructure:"timeout"`
// CatchupMaxDaysPerTick caps how many sealed (is_completed=true) days the
// orchestrator processes per tick, bounding Zeus POST blast radius. At the
// default 30/tick and a 6h Interval, a full 12-month bootstrap catch-up
// converges in roughly 3 days.
CatchupMaxDaysPerTick int `mapstructure:"catchup_max_days_per_tick"`
}
func newConfig() factory.Config {
return Config{
Provider: "noop",
Interval: 6 * time.Hour,
Timeout: 5 * time.Minute,
CatchupMaxDaysPerTick: 30,
}
}
func NewConfigFactory() factory.ConfigFactory {
return factory.NewConfigFactory(factory.MustNewName("meterreporter"), newConfig)
}
func (c Config) Validate() error {
if c.Interval < 5*time.Minute {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput, "meterreporter::interval must be at least 5m")
}
if c.Timeout < 3*time.Minute {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput, "meterreporter::timeout must be at least 3m")
}
if c.Timeout >= c.Interval {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput, "meterreporter::timeout must be less than meterreporter::interval")
}
if c.CatchupMaxDaysPerTick < 1 || c.CatchupMaxDaysPerTick > 60 {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput, "meterreporter::catchup_max_days_per_tick must be between 1 and 60")
}
return nil
}

View File

@@ -0,0 +1,14 @@
package meterreporter
import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
)
var (
ErrCodeInvalidInput = errors.MustNewCode("meterreporter_invalid_input")
)
type Reporter interface {
factory.ServiceWithHealthy
}

View File

@@ -0,0 +1,39 @@
package noopmeterreporter
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/meterreporter"
)
type provider struct {
healthyC chan struct{}
stopC chan struct{}
}
func NewFactory() factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config] {
return factory.NewProviderFactory(factory.MustNewName("noop"), New)
}
func New(_ context.Context, _ factory.ProviderSettings, _ meterreporter.Config) (meterreporter.Reporter, error) {
return &provider{
healthyC: make(chan struct{}),
stopC: make(chan struct{}),
}, nil
}
func (p *provider) Start(_ context.Context) error {
close(p.healthyC)
<-p.stopC
return nil
}
func (p *provider) Stop(_ context.Context) error {
close(p.stopC)
return nil
}
func (p *provider) Healthy() <-chan struct{} {
return p.healthyC
}

View File

@@ -0,0 +1,158 @@
package impllmpricingrule
import (
"context"
"net/http"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/http/binding"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/modules/llmpricingrule"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/llmpricingruletypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/gorilla/mux"
)
const maxLimit = 100
type handler struct {
module llmpricingrule.Module
providerSettings factory.ProviderSettings
}
func NewHandler(module llmpricingrule.Module, providerSettings factory.ProviderSettings) llmpricingrule.Handler {
return &handler{module: module, providerSettings: providerSettings}
}
// List handles GET /api/v1/llm_pricing_rules.
func (h *handler) List(rw http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(rw, err)
return
}
orgID := valuer.MustNewUUID(claims.OrgID)
var q llmpricingruletypes.ListPricingRulesQuery
if err := binding.Query.BindQuery(r.URL.Query(), &q); err != nil {
render.Error(rw, err)
return
}
if q.Limit <= 0 {
q.Limit = 20
} else if q.Limit > maxLimit {
q.Limit = maxLimit
}
if q.Offset < 0 {
render.Error(rw, errors.Newf(errors.TypeInvalidInput, llmpricingruletypes.ErrCodePricingRuleInvalidInput, "offset must be a non-negative integer"))
return
}
rules, total, err := h.module.List(ctx, orgID, q.Offset, q.Limit)
if err != nil {
render.Error(rw, err)
return
}
render.Success(rw, http.StatusOK, llmpricingruletypes.NewGettableLLMPricingRulesFromLLMPricingRules(rules, total, q.Offset, q.Limit))
}
// Get handles GET /api/v1/llm_pricing_rules/{id}.
func (h *handler) Get(rw http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(rw, err)
return
}
orgID := valuer.MustNewUUID(claims.OrgID)
id, err := ruleIDFromPath(r)
if err != nil {
render.Error(rw, err)
return
}
rule, err := h.module.Get(ctx, orgID, id)
if err != nil {
render.Error(rw, err)
return
}
render.Success(rw, http.StatusOK, rule)
}
func (h *handler) CreateOrUpdate(rw http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 30*time.Second)
defer cancel()
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(rw, err)
return
}
orgID := valuer.MustNewUUID(claims.OrgID)
req := new(llmpricingruletypes.UpdatableLLMPricingRules)
if err := binding.JSON.BindBody(r.Body, req); err != nil {
render.Error(rw, err)
return
}
err = h.module.CreateOrUpdate(ctx, orgID, claims.Email, req.Rules)
if err != nil {
render.Error(rw, err)
return
}
render.Success(rw, http.StatusNoContent, nil)
}
// Delete handles DELETE /api/v1/llm_pricing_rules/{id}.
func (h *handler) Delete(rw http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(rw, err)
return
}
orgID := valuer.MustNewUUID(claims.OrgID)
id, err := ruleIDFromPath(r)
if err != nil {
render.Error(rw, err)
return
}
if err := h.module.Delete(ctx, orgID, id); err != nil {
render.Error(rw, err)
return
}
render.Success(rw, http.StatusNoContent, nil)
}
// ruleIDFromPath extracts and validates the {id} path variable.
func ruleIDFromPath(r *http.Request) (valuer.UUID, error) {
raw := mux.Vars(r)["id"]
id, err := valuer.NewUUID(raw)
if err != nil {
return valuer.UUID{}, errors.Wrapf(err, errors.TypeInvalidInput, llmpricingruletypes.ErrCodePricingRuleInvalidInput, "id is not a valid uuid")
}
return id, nil
}

View File

@@ -0,0 +1,24 @@
package llmpricingrule
import (
"context"
"net/http"
"github.com/SigNoz/signoz/pkg/types/llmpricingruletypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type Module interface {
List(ctx context.Context, orgID valuer.UUID, offset, limit int) ([]*llmpricingruletypes.LLMPricingRule, int, error)
Get(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*llmpricingruletypes.LLMPricingRule, error)
CreateOrUpdate(ctx context.Context, orgID valuer.UUID, userEmail string, rules []llmpricingruletypes.UpdatableLLMPricingRule) (err error)
Delete(ctx context.Context, orgID, id valuer.UUID) error
}
// Handler defines the HTTP handler interface for pricing rule endpoints.
type Handler interface {
List(rw http.ResponseWriter, r *http.Request)
Get(rw http.ResponseWriter, r *http.Request)
CreateOrUpdate(rw http.ResponseWriter, r *http.Request)
Delete(rw http.ResponseWriter, r *http.Request)
}

View File

@@ -24,6 +24,7 @@ import (
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/ctxtypes"
"github.com/SigNoz/signoz/pkg/types/instrumentationtypes"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/SigNoz/signoz/pkg/valuer"
@@ -1377,7 +1378,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
if apiErr != nil {
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
}
if statusItem.Status == constants.StatusPending {
if statusItem.Status == types.TTLSettingStatusPending {
return nil, &model.ApiError{Typ: model.ErrorConflict, Err: fmt.Errorf("TTL is already running")}
}
}
@@ -1436,7 +1437,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
TransactionID: uuid,
TableName: tableName,
TTL: int(params.DelDuration),
Status: constants.StatusPending,
Status: types.TTLSettingStatusPending,
ColdStorageTTL: coldStorageDuration,
OrgID: orgID,
}
@@ -1462,7 +1463,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
NewUpdate().
Model(new(types.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", constants.StatusFailed).
Set("status = ?", types.TTLSettingStatusFailed).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -1482,7 +1483,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
NewUpdate().
Model(new(types.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", constants.StatusFailed).
Set("status = ?", types.TTLSettingStatusFailed).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -1497,7 +1498,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
NewUpdate().
Model(new(types.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", constants.StatusSuccess).
Set("status = ?", types.TTLSettingStatusSuccess).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -1540,7 +1541,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
if apiErr != nil {
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
}
if statusItem.Status == constants.StatusPending {
if statusItem.Status == types.TTLSettingStatusPending {
return nil, &model.ApiError{Typ: model.ErrorConflict, Err: fmt.Errorf("TTL is already running")}
}
}
@@ -1574,7 +1575,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
TransactionID: uuid,
TableName: tableName,
TTL: int(params.DelDuration),
Status: constants.StatusPending,
Status: types.TTLSettingStatusPending,
ColdStorageTTL: coldStorageDuration,
OrgID: orgID,
}
@@ -1612,7 +1613,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
NewUpdate().
Model(new(types.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", constants.StatusFailed).
Set("status = ?", types.TTLSettingStatusFailed).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -1633,7 +1634,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
NewUpdate().
Model(new(types.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", constants.StatusFailed).
Set("status = ?", types.TTLSettingStatusFailed).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -1648,7 +1649,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
NewUpdate().
Model(new(types.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", constants.StatusSuccess).
Set("status = ?", types.TTLSettingStatusSuccess).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -1764,7 +1765,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *m
if apiErr != nil {
return nil, errorsV2.Newf(errorsV2.TypeInternal, errorsV2.CodeInternal, "error in processing custom_retention_ttl_status check sql query")
}
if statusItem.Status == constants.StatusPending {
if statusItem.Status == types.TTLSettingStatusPending {
return nil, errorsV2.Newf(errorsV2.TypeInternal, errorsV2.CodeInternal, "custom retention TTL is already running")
}
}
@@ -1850,7 +1851,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *m
TableName: tableName,
TTL: params.DefaultTTLDays,
Condition: string(ttlConditionsJSON),
Status: constants.StatusPending,
Status: types.TTLSettingStatusPending,
ColdStorageTTL: coldStorageDuration,
OrgID: orgID,
}
@@ -1866,7 +1867,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *m
err := r.setColdStorage(ctx, tableName, params.ColdStorageVolume)
if err != nil {
r.logger.Error("error in setting cold storage", errorsV2.Attr(err))
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, constants.StatusFailed)
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, types.TTLSettingStatusFailed)
return nil, errorsV2.Wrapf(err.Err, errorsV2.TypeInternal, errorsV2.CodeInternal, "error setting cold storage for table %s", tableName)
}
}
@@ -1875,12 +1876,12 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *m
r.logger.Debug("Executing custom retention TTL request: ", "request", query, "step", i+1)
if err := r.db.Exec(ctx, query); err != nil {
r.logger.Error("error while setting custom retention ttl", errorsV2.Attr(err))
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, constants.StatusFailed)
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, types.TTLSettingStatusFailed)
return nil, errorsV2.Wrapf(err, errorsV2.TypeInternal, errorsV2.CodeInternal, "error setting custom retention TTL for table %s, query: %s", tableName, query)
}
}
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, constants.StatusSuccess)
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, types.TTLSettingStatusSuccess)
}
return &model.CustomRetentionTTLResponse{
@@ -1889,7 +1890,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *m
}
// New method to build multiIf expressions with support for multiple AND conditions
func (r *ClickHouseReader) buildMultiIfExpression(ttlConditions []model.CustomRetentionRule, defaultTTLDays int, isResourceTable bool) string {
func (r *ClickHouseReader) buildMultiIfExpression(ttlConditions []retentiontypes.CustomRetentionRule, defaultTTLDays int, isResourceTable bool) string {
var conditions []string
for i, rule := range ttlConditions {
@@ -1993,19 +1994,19 @@ func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID stri
if err == sql.ErrNoRows {
// No V2 configuration found, return defaults
response.DefaultTTLDays = 15
response.TTLConditions = []model.CustomRetentionRule{}
response.Status = constants.StatusSuccess
response.DefaultTTLDays = retentiontypes.DefaultLogsRetentionDays
response.TTLConditions = []retentiontypes.CustomRetentionRule{}
response.Status = types.TTLSettingStatusSuccess
response.ColdStorageTTLDays = -1
return response, nil
}
// Parse TTL conditions from Condition
var ttlConditions []model.CustomRetentionRule
var ttlConditions []retentiontypes.CustomRetentionRule
if customTTL.Condition != "" {
if err := json.Unmarshal([]byte(customTTL.Condition), &ttlConditions); err != nil {
r.logger.Error("Error parsing TTL conditions", errorsV2.Attr(err))
ttlConditions = []model.CustomRetentionRule{}
ttlConditions = []retentiontypes.CustomRetentionRule{}
}
}
@@ -2040,7 +2041,7 @@ func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID stri
}
// For V1, we don't have TTL conditions
response.TTLConditions = []model.CustomRetentionRule{}
response.TTLConditions = []retentiontypes.CustomRetentionRule{}
}
return response, nil
@@ -2080,7 +2081,7 @@ func (r *ClickHouseReader) updateCustomRetentionTTLStatus(ctx context.Context, o
}
// Enhanced validation function with duplicate detection and efficient key validation
func (r *ClickHouseReader) validateTTLConditions(ctx context.Context, ttlConditions []model.CustomRetentionRule) error {
func (r *ClickHouseReader) validateTTLConditions(ctx context.Context, ttlConditions []retentiontypes.CustomRetentionRule) error {
ctx = ctxtypes.NewContextWithCommentVals(ctx, map[string]string{
instrumentationtypes.CodeNamespace: "clickhouse-reader",
instrumentationtypes.CodeFunctionName: "validateTTLConditions",
@@ -2230,7 +2231,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
if apiErr != nil {
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
}
if statusItem.Status == constants.StatusPending {
if statusItem.Status == types.TTLSettingStatusPending {
return nil, &model.ApiError{Typ: model.ErrorConflict, Err: fmt.Errorf("TTL is already running")}
}
}
@@ -2246,7 +2247,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
TransactionID: uuid,
TableName: tableName,
TTL: int(params.DelDuration),
Status: constants.StatusPending,
Status: types.TTLSettingStatusPending,
ColdStorageTTL: coldStorageDuration,
OrgID: orgID,
}
@@ -2284,7 +2285,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
NewUpdate().
Model(new(types.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", constants.StatusFailed).
Set("status = ?", types.TTLSettingStatusFailed).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -2305,7 +2306,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
NewUpdate().
Model(new(types.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", constants.StatusFailed).
Set("status = ?", types.TTLSettingStatusFailed).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -2320,7 +2321,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
NewUpdate().
Model(new(types.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", constants.StatusSuccess).
Set("status = ?", types.TTLSettingStatusSuccess).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -2388,7 +2389,7 @@ func (r *ClickHouseReader) checkTTLStatusItem(ctx context.Context, orgID string,
// getTTLQueryStatus fetches ttl_status table status from DB
func (r *ClickHouseReader) getTTLQueryStatus(ctx context.Context, orgID string, tableNameArray []string) (string, *model.ApiError) {
failFlag := false
status := constants.StatusSuccess
status := types.TTLSettingStatusSuccess
for _, tableName := range tableNameArray {
statusItem, apiErr := r.checkTTLStatusItem(ctx, orgID, tableName)
emptyStatusStruct := new(types.TTLSetting)
@@ -2398,16 +2399,16 @@ func (r *ClickHouseReader) getTTLQueryStatus(ctx context.Context, orgID string,
if apiErr != nil {
return "", &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
}
if statusItem.Status == constants.StatusPending && statusItem.UpdatedAt.Unix()-time.Now().Unix() < 3600 {
status = constants.StatusPending
if statusItem.Status == types.TTLSettingStatusPending && statusItem.UpdatedAt.Unix()-time.Now().Unix() < 3600 {
status = types.TTLSettingStatusPending
return status, nil
}
if statusItem.Status == constants.StatusFailed {
if statusItem.Status == types.TTLSettingStatusFailed {
failFlag = true
}
}
if failFlag {
status = constants.StatusFailed
status = types.TTLSettingStatusFailed
}
return status, nil

View File

@@ -3,6 +3,7 @@ package model
import (
"time"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
)
@@ -412,21 +413,11 @@ type TTLParams struct {
}
type CustomRetentionTTLParams struct {
Type string `json:"type"`
DefaultTTLDays int `json:"defaultTTLDays"`
TTLConditions []CustomRetentionRule `json:"ttlConditions"`
ColdStorageVolume string `json:"coldStorageVolume,omitempty"`
ToColdStorageDurationDays int64 `json:"coldStorageDurationDays,omitempty"`
}
type CustomRetentionRule struct {
Filters []FilterCondition `json:"conditions"`
TTLDays int `json:"ttlDays"`
}
type FilterCondition struct {
Key string `json:"key"`
Values []string `json:"values"`
Type string `json:"type"`
DefaultTTLDays int `json:"defaultTTLDays"`
TTLConditions []retentiontypes.CustomRetentionRule `json:"ttlConditions"`
ColdStorageVolume string `json:"coldStorageVolume,omitempty"`
ToColdStorageDurationDays int64 `json:"coldStorageDurationDays,omitempty"`
}
type GetCustomRetentionTTLResponse struct {
@@ -440,10 +431,10 @@ type GetCustomRetentionTTLResponse struct {
ExpectedLogsMoveTime int `json:"expected_logs_move_ttl_duration_hrs,omitempty"`
// V2 fields
DefaultTTLDays int `json:"default_ttl_days,omitempty"`
TTLConditions []CustomRetentionRule `json:"ttl_conditions,omitempty"`
ColdStorageVolume string `json:"cold_storage_volume,omitempty"`
ColdStorageTTLDays int `json:"cold_storage_ttl_days,omitempty"`
DefaultTTLDays int `json:"default_ttl_days,omitempty"`
TTLConditions []retentiontypes.CustomRetentionRule `json:"ttl_conditions,omitempty"`
ColdStorageVolume string `json:"cold_storage_volume,omitempty"`
ColdStorageTTLDays int `json:"cold_storage_ttl_days,omitempty"`
}
type CustomRetentionTTLResponse struct {

View File

@@ -4,6 +4,10 @@ const (
TrueConditionLiteral = "true"
SkipConditionLiteral = "__skip__"
ErrorConditionLiteral = "__skip_because_of_error__"
// BodyFullTextSearchDefaultWarning is emitted when a full-text search or "body" searches are hit
// with New JSON Body enhancements.
BodyFullTextSearchDefaultWarning = "Full text searches default to `body.message:string`. Use `body.<key>` to search a different field inside body"
)
var (

View File

@@ -362,6 +362,10 @@ func (v *filterExpressionVisitor) VisitPrimary(ctx *grammar.PrimaryContext) any
v.errors = append(v.errors, fmt.Sprintf("failed to build full text search condition: %s", err.Error()))
return ErrorConditionLiteral
}
if v.bodyJSONEnabled && v.fullTextColumn.Name == "body" {
v.warnings = append(v.warnings, BodyFullTextSearchDefaultWarning)
}
return cond
}
@@ -717,6 +721,10 @@ func (v *filterExpressionVisitor) VisitFullText(ctx *grammar.FullTextContext) an
return ErrorConditionLiteral
}
if v.bodyJSONEnabled && v.fullTextColumn.Name == "body" {
v.warnings = append(v.warnings, BodyFullTextSearchDefaultWarning)
}
return cond
}

View File

@@ -23,6 +23,7 @@ import (
"github.com/SigNoz/signoz/pkg/global"
"github.com/SigNoz/signoz/pkg/identn"
"github.com/SigNoz/signoz/pkg/instrumentation"
"github.com/SigNoz/signoz/pkg/meterreporter"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
"github.com/SigNoz/signoz/pkg/modules/inframonitoring"
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
@@ -135,6 +136,9 @@ type Config struct {
// Auditor config
Auditor auditor.Config `mapstructure:"auditor"`
// MeterReporter config
MeterReporter meterreporter.Config `mapstructure:"meterreporter"`
// CloudIntegration config
CloudIntegration cloudintegration.Config `mapstructure:"cloudintegration"`
@@ -175,6 +179,7 @@ func NewConfig(ctx context.Context, logger *slog.Logger, resolverConfig config.R
identn.NewConfigFactory(),
serviceaccount.NewConfigFactory(),
auditor.NewConfigFactory(),
meterreporter.NewConfigFactory(),
cloudintegration.NewConfigFactory(),
tracedetail.NewConfigFactory(),
authz.NewConfigFactory(),

View File

@@ -22,6 +22,8 @@ import (
"github.com/SigNoz/signoz/pkg/modules/fields/implfields"
"github.com/SigNoz/signoz/pkg/modules/inframonitoring"
"github.com/SigNoz/signoz/pkg/modules/inframonitoring/implinframonitoring"
"github.com/SigNoz/signoz/pkg/modules/llmpricingrule"
"github.com/SigNoz/signoz/pkg/modules/llmpricingrule/impllmpricingrule"
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer/implmetricsexplorer"
"github.com/SigNoz/signoz/pkg/modules/quickfilter"
@@ -77,6 +79,7 @@ type Handlers struct {
AlertmanagerHandler alertmanager.Handler
TraceDetail tracedetail.Handler
RulerHandler ruler.Handler
LLMPricingRuleHandler llmpricingrule.Handler
}
func NewHandlers(
@@ -121,5 +124,6 @@ func NewHandlers(
AlertmanagerHandler: signozalertmanager.NewHandler(alertmanagerService),
TraceDetail: impltracedetail.NewHandler(modules.TraceDetail),
RulerHandler: signozruler.NewHandler(rulerService),
LLMPricingRuleHandler: impllmpricingrule.NewHandler(nil, providerSettings),
}
}

View File

@@ -22,6 +22,7 @@ import (
"github.com/SigNoz/signoz/pkg/modules/dashboard"
"github.com/SigNoz/signoz/pkg/modules/fields"
"github.com/SigNoz/signoz/pkg/modules/inframonitoring"
"github.com/SigNoz/signoz/pkg/modules/llmpricingrule"
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/modules/preference"
@@ -77,6 +78,7 @@ func NewOpenAPI(ctx context.Context, instrumentation instrumentation.Instrumenta
struct{ rulestatehistory.Handler }{},
struct{ spanmapper.Handler }{},
struct{ alertmanager.Handler }{},
struct{ llmpricingrule.Handler }{},
struct{ tracedetail.Handler }{},
struct{ ruler.Handler }{},
).New(ctx, instrumentation.ToProviderSettings(), apiserver.Config{})

View File

@@ -28,6 +28,8 @@ import (
"github.com/SigNoz/signoz/pkg/identn/apikeyidentn"
"github.com/SigNoz/signoz/pkg/identn/impersonationidentn"
"github.com/SigNoz/signoz/pkg/identn/tokenizeridentn"
"github.com/SigNoz/signoz/pkg/meterreporter"
"github.com/SigNoz/signoz/pkg/meterreporter/noopmeterreporter"
"github.com/SigNoz/signoz/pkg/modules/authdomain/implauthdomain"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
@@ -283,6 +285,7 @@ func NewAPIServerProviderFactories(orgGetter organization.Getter, authz authz.Au
handlers.RuleStateHistory,
handlers.SpanMapperHandler,
handlers.AlertmanagerHandler,
handlers.LLMPricingRuleHandler,
handlers.TraceDetail,
handlers.RulerHandler,
),
@@ -317,6 +320,12 @@ func NewAuditorProviderFactories() factory.NamedMap[factory.ProviderFactory[audi
)
}
func NewMeterReporterProviderFactories() factory.NamedMap[factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config]] {
return factory.MustNewNamedMap(
noopmeterreporter.NewFactory(),
)
}
func NewFlaggerProviderFactories(registry featuretypes.Registry) factory.NamedMap[factory.ProviderFactory[flagger.FlaggerProvider, flagger.Config]] {
return factory.MustNewNamedMap(
configflagger.NewFactory(registry),

View File

@@ -22,6 +22,7 @@ import (
"github.com/SigNoz/signoz/pkg/identn"
"github.com/SigNoz/signoz/pkg/instrumentation"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/meterreporter"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
"github.com/SigNoz/signoz/pkg/modules/organization"
@@ -84,6 +85,7 @@ type SigNoz struct {
Flagger flagger.Flagger
Gateway gateway.Gateway
Auditor auditor.Auditor
MeterReporter meterreporter.Reporter
}
func New(
@@ -104,6 +106,7 @@ func New(
dashboardModuleCallback func(sqlstore.SQLStore, factory.ProviderSettings, analytics.Analytics, organization.Getter, queryparser.QueryParser, querier.Querier, licensing.Licensing) dashboard.Module,
gatewayProviderFactory func(licensing.Licensing) factory.ProviderFactory[gateway.Gateway, gateway.Config],
auditorProviderFactories func(licensing.Licensing) factory.NamedMap[factory.ProviderFactory[auditor.Auditor, auditor.Config]],
meterReporterProviderFactories func(licensing.Licensing, telemetrystore.TelemetryStore, sqlstore.SQLStore, organization.Getter, zeus.Zeus) factory.NamedMap[factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config]],
querierHandlerCallback func(factory.ProviderSettings, querier.Querier, analytics.Analytics) querier.Handler,
cloudIntegrationCallback func(sqlstore.SQLStore, global.Global, zeus.Zeus, gateway.Gateway, licensing.Licensing, serviceaccount.Module, cloudintegration.Config) (cloudintegration.Module, error),
rulerProviderFactories func(cache.Cache, alertmanager.Alertmanager, sqlstore.SQLStore, telemetrystore.TelemetryStore, telemetrytypes.MetadataStore, prometheus.Prometheus, organization.Getter, rulestatehistory.Module, querier.Querier, queryparser.QueryParser) factory.NamedMap[factory.ProviderFactory[ruler.Ruler, ruler.Config]],
@@ -386,6 +389,12 @@ func New(
return nil, err
}
// Initialize meter reporter from the variant-specific provider factories
meterReporter, err := factory.NewProviderFromNamedMap(ctx, providerSettings, config.MeterReporter, meterReporterProviderFactories(licensing, telemetrystore, sqlstore, orgGetter, zeus), config.MeterReporter.Provider)
if err != nil {
return nil, err
}
// Initialize authns
store := sqlauthnstore.NewStore(sqlstore)
authNs, err := authNsCallback(ctx, providerSettings, store, licensing)
@@ -501,6 +510,7 @@ func New(
factory.NewNamedService(factory.MustNewName("authz"), authz),
factory.NewNamedService(factory.MustNewName("user"), userService, factory.MustNewName("authz")),
factory.NewNamedService(factory.MustNewName("auditor"), auditor),
factory.NewNamedService(factory.MustNewName("meterreporter"), meterReporter, factory.MustNewName("licensing")),
factory.NewNamedService(factory.MustNewName("ruler"), rulerInstance),
)
if err != nil {
@@ -550,5 +560,6 @@ func New(
Flagger: flagger,
Gateway: gateway,
Auditor: auditor,
MeterReporter: meterReporter,
}, nil
}

View File

@@ -894,12 +894,12 @@ func TestAdjustKey(t *testing.T) {
func TestStmtBuilderBodyField(t *testing.T) {
cases := []struct {
name string
requestType qbtypes.RequestType
query qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]
name string
requestType qbtypes.RequestType
query qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]
enableUseJSONBody bool
expected qbtypes.Statement
expectedErr error
expected qbtypes.Statement
expectedErr error
}{
{
name: "body_exists",
@@ -1039,15 +1039,15 @@ func TestStmtBuilderBodyField(t *testing.T) {
func TestStmtBuilderBodyFullTextSearch(t *testing.T) {
cases := []struct {
name string
requestType qbtypes.RequestType
query qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]
name string
requestType qbtypes.RequestType
query qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]
enableUseJSONBody bool
expected qbtypes.Statement
expectedErr error
expected qbtypes.Statement
expectedErr error
}{
{
name: "body_contains",
name: "fts",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
@@ -1056,13 +1056,30 @@ func TestStmtBuilderBodyFullTextSearch(t *testing.T) {
},
enableUseJSONBody: true,
expected: qbtypes.Statement{
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body_v2 as body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE match(LOWER(body_v2.message), LOWER(?)) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"error", "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body_v2 as body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE match(LOWER(body_v2.message), LOWER(?)) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"error", "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
Warnings: []string{querybuilder.BodyFullTextSearchDefaultWarning},
},
expectedErr: nil,
},
{
name: "body_contains_disabled",
name: "fts_2",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
Filter: &qbtypes.Filter{Expression: "error"},
Limit: 10,
},
enableUseJSONBody: true,
expected: qbtypes.Statement{
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body_v2 as body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE match(LOWER(body_v2.message), LOWER(?)) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"error", "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
Warnings: []string{querybuilder.BodyFullTextSearchDefaultWarning},
},
expectedErr: nil,
},
{
name: "fts_disabled",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,

View File

@@ -413,21 +413,18 @@ func (b *traceOperatorCTEBuilder) buildFinalQuery(ctx context.Context, selectFro
}
func (b *traceOperatorCTEBuilder) buildListQuery(ctx context.Context, selectFromCTE string) (*qbtypes.Statement, error) {
keySelectors := b.getKeySelectors()
for _, field := range b.operator.SelectFields {
keySelectors = append(keySelectors, &telemetrytypes.FieldKeySelector{
Name: field.Name,
Signal: telemetrytypes.SignalTraces,
FieldContext: field.FieldContext,
FieldDataType: field.FieldDataType,
})
}
keys, _, err := b.stmtBuilder.metadataStore.GetKeysMulti(ctx, keySelectors)
if err != nil {
return nil, err
}
sb := sqlbuilder.NewSelectBuilder()
// Select core fields
sb.Select(
"timestamp",
"trace_id",
"span_id",
"name",
"duration_nano",
"parent_span_id",
)
coreFields := []string{"trace_id", "span_id", "name", "duration_nano", "parent_span_id"}
selectedFields := map[string]bool{
"timestamp": true,
"trace_id": true,
@@ -437,15 +434,23 @@ func (b *traceOperatorCTEBuilder) buildListQuery(ctx context.Context, selectFrom
"parent_span_id": true,
}
// Inner SELECT reads from the CTE and renames timestamp→ts.
// This breaks the `ORDER BY col AS `col`` pattern that triggers a
// CH 25.12.5 distributed-analyzer regression (NOT_FOUND_COLUMN_IN_BLOCK /
// timestamp renamed to timestamp_0). See ClickHouse/ClickHouse#103508.
innerSB := sqlbuilder.NewSelectBuilder()
innerSB.Select("timestamp AS ts")
innerSB.SelectMore(coreFields...)
// Get keys for selectFields
keySelectors := b.getKeySelectors()
for _, field := range b.operator.SelectFields {
keySelectors = append(keySelectors, &telemetrytypes.FieldKeySelector{
Name: field.Name,
Signal: telemetrytypes.SignalTraces,
FieldContext: field.FieldContext,
FieldDataType: field.FieldDataType,
})
}
var additionalSelectedFields []string
keys, _, err := b.stmtBuilder.metadataStore.GetKeysMulti(ctx, keySelectors)
if err != nil {
return nil, err
}
// Add selectFields using ColumnExpressionFor since we now have all base table columns
for _, field := range b.operator.SelectFields {
if selectedFields[field.Name] {
continue
@@ -456,60 +461,41 @@ func (b *traceOperatorCTEBuilder) buildListQuery(ctx context.Context, selectFrom
slog.String("field", field.Name), errors.Attr(err))
continue
}
innerSB.SelectMore(colExpr)
sb.SelectMore(colExpr)
selectedFields[field.Name] = true
additionalSelectedFields = append(additionalSelectedFields, field.Name)
}
// Also expose any explicit ORDER BY fields that aren't already selected,
// so the outer query can reference them by alias name.
sb.From(selectFromCTE)
// Add order by support using ColumnExpressionFor
orderApplied := false
for _, orderBy := range b.operator.Order {
if selectedFields[orderBy.Key.Name] {
continue
}
colExpr, err := b.stmtBuilder.fm.ColumnExpressionFor(ctx, b.start, b.end, &orderBy.Key.TelemetryFieldKey, keys)
if err != nil {
return nil, err
}
innerSB.SelectMore(colExpr)
selectedFields[orderBy.Key.Name] = true
sb.OrderBy(fmt.Sprintf("%s %s", colExpr, orderBy.Direction.StringValue()))
orderApplied = true
}
innerSB.From(selectFromCTE)
innerSQL, innerArgs := innerSB.BuildWithFlavor(sqlbuilder.ClickHouse)
// Outer SELECT reads from the inner subquery and re-exposes timestamp via
// the ts alias. ORDER BY uses the alias name directly — no AS-alias in the
// ORDER BY position — which is the pattern that avoids the CH regression.
outerSB := sqlbuilder.NewSelectBuilder()
outerSB.Select("ts AS timestamp")
outerSB.SelectMore(coreFields...)
for _, name := range additionalSelectedFields {
outerSB.SelectMore(fmt.Sprintf("`%s`", name))
}
outerSB.From(fmt.Sprintf("(%s) AS t", innerSQL))
if len(b.operator.Order) > 0 {
for _, orderBy := range b.operator.Order {
outerSB.OrderBy(fmt.Sprintf("`%s` %s", orderBy.Key.Name, orderBy.Direction.StringValue()))
}
} else {
outerSB.OrderBy("timestamp DESC")
if !orderApplied {
sb.OrderBy("timestamp DESC")
}
if b.operator.Limit > 0 {
outerSB.Limit(b.operator.Limit)
sb.Limit(b.operator.Limit)
} else {
outerSB.Limit(100)
}
if b.operator.Offset > 0 {
outerSB.Offset(b.operator.Offset)
sb.Limit(100)
}
outerSQL, outerArgs := outerSB.BuildWithFlavor(sqlbuilder.ClickHouse)
if b.operator.Offset > 0 {
sb.Offset(b.operator.Offset)
}
sql, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
return &qbtypes.Statement{
Query: outerSQL,
Args: append(innerArgs, outerArgs...),
Query: sql,
Args: args,
}, nil
}

View File

@@ -67,7 +67,7 @@ func TestTraceOperatorStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_DIR_DESC_B AS (SELECT p.* FROM A AS p INNER JOIN B AS c ON p.trace_id = c.trace_id AND p.span_id = c.parent_span_id) SELECT ts AS timestamp, trace_id, span_id, name, duration_nano, parent_span_id, `service.name` FROM (SELECT timestamp AS ts, trace_id, span_id, name, duration_nano, parent_span_id, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) AS `service.name` FROM A_DIR_DESC_B) AS t ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_DIR_DESC_B AS (SELECT p.* FROM A AS p INNER JOIN B AS c ON p.trace_id = c.trace_id AND p.span_id = c.parent_span_id) SELECT timestamp, trace_id, span_id, name, duration_nano, parent_span_id, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) AS `service.name` FROM A_DIR_DESC_B ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "frontend", "%service.name%", "%service.name\":\"frontend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "backend", "%service.name%", "%service.name\":\"backend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10},
},
expectedErr: nil,
@@ -104,7 +104,7 @@ func TestTraceOperatorStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_INDIR_DESC_B AS (WITH RECURSIVE up AS (SELECT d.trace_id, d.span_id, d.parent_span_id, 0 AS depth FROM B AS d UNION ALL SELECT p.trace_id, p.span_id, p.parent_span_id, up.depth + 1 FROM all_spans AS p JOIN up ON p.trace_id = up.trace_id AND p.span_id = up.parent_span_id WHERE up.depth < 100) SELECT DISTINCT a.* FROM A AS a GLOBAL INNER JOIN (SELECT DISTINCT trace_id, span_id FROM up WHERE depth > 0 ) AS ancestors ON ancestors.trace_id = a.trace_id AND ancestors.span_id = a.span_id) SELECT ts AS timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM (SELECT timestamp AS ts, trace_id, span_id, name, duration_nano, parent_span_id FROM A_INDIR_DESC_B) AS t ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_INDIR_DESC_B AS (WITH RECURSIVE up AS (SELECT d.trace_id, d.span_id, d.parent_span_id, 0 AS depth FROM B AS d UNION ALL SELECT p.trace_id, p.span_id, p.parent_span_id, up.depth + 1 FROM all_spans AS p JOIN up ON p.trace_id = up.trace_id AND p.span_id = up.parent_span_id WHERE up.depth < 100) SELECT DISTINCT a.* FROM A AS a GLOBAL INNER JOIN (SELECT DISTINCT trace_id, span_id FROM up WHERE depth > 0 ) AS ancestors ON ancestors.trace_id = a.trace_id AND ancestors.span_id = a.span_id) SELECT timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM A_INDIR_DESC_B ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "gateway", "%service.name%", "%service.name\":\"gateway%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "database", "%service.name%", "%service.name\":\"database%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 5},
},
expectedErr: nil,
@@ -141,7 +141,7 @@ func TestTraceOperatorStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_AND_B AS (SELECT l.* FROM A AS l INNER JOIN B AS r ON l.trace_id = r.trace_id) SELECT ts AS timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM (SELECT timestamp AS ts, trace_id, span_id, name, duration_nano, parent_span_id FROM A_AND_B) AS t ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_AND_B AS (SELECT l.* FROM A AS l INNER JOIN B AS r ON l.trace_id = r.trace_id) SELECT timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM A_AND_B ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "frontend", "%service.name%", "%service.name\":\"frontend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "backend", "%service.name%", "%service.name\":\"backend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 15},
},
expectedErr: nil,
@@ -178,7 +178,7 @@ func TestTraceOperatorStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_OR_B AS (SELECT * FROM A UNION DISTINCT SELECT * FROM B) SELECT ts AS timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM (SELECT timestamp AS ts, trace_id, span_id, name, duration_nano, parent_span_id FROM A_OR_B) AS t ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_OR_B AS (SELECT * FROM A UNION DISTINCT SELECT * FROM B) SELECT timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM A_OR_B ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "frontend", "%service.name%", "%service.name\":\"frontend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "backend", "%service.name%", "%service.name\":\"backend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 20},
},
expectedErr: nil,
@@ -215,7 +215,7 @@ func TestTraceOperatorStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_not_B AS (SELECT l.* FROM A AS l WHERE l.trace_id GLOBAL NOT IN (SELECT DISTINCT trace_id FROM B)) SELECT ts AS timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM (SELECT timestamp AS ts, trace_id, span_id, name, duration_nano, parent_span_id FROM A_not_B) AS t ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_not_B AS (SELECT l.* FROM A AS l WHERE l.trace_id GLOBAL NOT IN (SELECT DISTINCT trace_id FROM B)) SELECT timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM A_not_B ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "frontend", "%service.name%", "%service.name\":\"frontend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "backend", "%service.name%", "%service.name\":\"backend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10},
},
expectedErr: nil,
@@ -380,72 +380,11 @@ func TestTraceOperatorStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_DIR_DESC_B AS (SELECT p.* FROM A AS p INNER JOIN B AS c ON p.trace_id = c.trace_id AND p.span_id = c.parent_span_id), __resource_filter_C AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), C AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_C) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_D AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), D AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_D) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), C_DIR_DESC_D AS (SELECT p.* FROM C AS p INNER JOIN D AS c ON p.trace_id = c.trace_id AND p.span_id = c.parent_span_id), A_DIR_DESC_B_AND_C_DIR_DESC_D AS (SELECT l.* FROM A_DIR_DESC_B AS l INNER JOIN C_DIR_DESC_D AS r ON l.trace_id = r.trace_id) SELECT ts AS timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM (SELECT timestamp AS ts, trace_id, span_id, name, duration_nano, parent_span_id FROM A_DIR_DESC_B_AND_C_DIR_DESC_D) AS t ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_DIR_DESC_B AS (SELECT p.* FROM A AS p INNER JOIN B AS c ON p.trace_id = c.trace_id AND p.span_id = c.parent_span_id), __resource_filter_C AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), C AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_C) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_D AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), D AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_D) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), C_DIR_DESC_D AS (SELECT p.* FROM C AS p INNER JOIN D AS c ON p.trace_id = c.trace_id AND p.span_id = c.parent_span_id), A_DIR_DESC_B_AND_C_DIR_DESC_D AS (SELECT l.* FROM A_DIR_DESC_B AS l INNER JOIN C_DIR_DESC_D AS r ON l.trace_id = r.trace_id) SELECT timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM A_DIR_DESC_B_AND_C_DIR_DESC_D ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "frontend", "%service.name%", "%service.name\":\"frontend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "backend", "%service.name%", "%service.name\":\"backend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "auth", "%service.name%", "%service.name\":\"auth%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "database", "%service.name%", "%service.name\":\"database%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 5},
},
expectedErr: nil,
},
{
// order-by field (http.request.method) is not present in SelectFields;
// it must be included in the inner SELECT so the outer ORDER BY can
// reference it by alias, but must NOT appear in the outer SELECT list.
name: "order by field not in select fields",
requestType: qbtypes.RequestTypeRaw,
operator: qbtypes.QueryBuilderTraceOperator{
Expression: "A => B",
SelectFields: []telemetrytypes.TelemetryFieldKey{
{
Name: "service.name",
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
},
Order: []qbtypes.OrderBy{
{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "http.request.method",
FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
},
Direction: qbtypes.OrderDirectionDesc,
},
},
Limit: 10,
},
compositeQuery: &qbtypes.CompositeQuery{
Queries: []qbtypes.QueryEnvelope{
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{
Name: "A",
Signal: telemetrytypes.SignalTraces,
Filter: &qbtypes.Filter{
Expression: "service.name = 'frontend'",
},
},
},
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{
Name: "B",
Signal: telemetrytypes.SignalTraces,
Filter: &qbtypes.Filter{
Expression: "service.name = 'backend'",
},
},
},
},
},
expected: qbtypes.Statement{
// http.request.method is in the inner SELECT (so ORDER BY can reach it)
// but is absent from the outer SELECT column list — only the ORDER BY clause references it.
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_DIR_DESC_B AS (SELECT p.* FROM A AS p INNER JOIN B AS c ON p.trace_id = c.trace_id AND p.span_id = c.parent_span_id) SELECT ts AS timestamp, trace_id, span_id, name, duration_nano, parent_span_id, `service.name` FROM (SELECT timestamp AS ts, trace_id, span_id, name, duration_nano, parent_span_id, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) AS `service.name`, attributes_string['http.request.method'] AS `http.request.method` FROM A_DIR_DESC_B) AS t ORDER BY `http.request.method` desc LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "frontend", "%service.name%", "%service.name\":\"frontend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "backend", "%service.name%", "%service.name\":\"backend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10},
},
expectedErr: nil,
},
}
fm := NewFieldMapper()

View File

@@ -0,0 +1,185 @@
package llmpricingruletypes
import (
"database/sql/driver"
"encoding/json"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
)
var (
ErrCodePricingRuleNotFound = errors.MustNewCode("pricing_rule_not_found")
ErrCodePricingRuleInvalidInput = errors.MustNewCode("pricing_rule_invalid_input")
)
type LLMPricingRuleUnit struct {
valuer.String
}
var (
UnitPerMillionTokens = LLMPricingRuleUnit{valuer.NewString("per_million_tokens")}
)
type LLMPricingRuleCacheMode struct {
valuer.String
}
var (
// LLMPricingRuleCacheModeSubtract: cached tokens are inside input_tokens (OpenAI-style).
LLMPricingRuleCacheModeSubtract = LLMPricingRuleCacheMode{valuer.NewString("subtract")}
// LLMPricingRuleCacheModeAdditive: cached tokens are reported separately (Anthropic-style).
LLMPricingRuleCacheModeAdditive = LLMPricingRuleCacheMode{valuer.NewString("additive")}
// LLMPricingRuleCacheModeUnknown: provider behaviour is unknown; falls back to subtract.
LLMPricingRuleCacheModeUnknown = LLMPricingRuleCacheMode{valuer.NewString("unknown")}
)
// StringSlice is a []string that is stored as a JSON text column.
// It is compatible with both SQLite and PostgreSQL.
type StringSlice []string
// LLMRulePricing is the per-rule pricing shape, persisted as a single JSON.
type LLMRulePricing struct {
Input float64 `json:"input" required:"true"`
Output float64 `json:"output" required:"true"`
Cache *LLMPricingCacheCosts `json:"cache,omitempty"`
}
type LLMPricingCacheCosts struct {
Mode LLMPricingRuleCacheMode `json:"mode" required:"true"`
Read float64 `json:"read"`
Write float64 `json:"write"`
}
type LLMPricingRule struct {
bun.BaseModel `bun:"table:llm_pricing_rule,alias:llm_pricing_rule" json:"-"`
types.Identifiable
types.TimeAuditable
types.UserAuditable
OrgID valuer.UUID `bun:"org_id,type:text,notnull" json:"orgId" required:"true"`
SourceID *valuer.UUID `bun:"source_id,type:text" json:"sourceId,omitempty"`
Model string `bun:"model,type:text,notnull" json:"modelName" required:"true"`
Provider string `bun:"provider,type:text,notnull" json:"provider" required:"true"`
ModelPattern StringSlice `bun:"model_pattern,type:text,notnull" json:"modelPattern" required:"true"`
Unit LLMPricingRuleUnit `bun:"unit,type:text,notnull" json:"unit" required:"true"`
Pricing LLMRulePricing `bun:"pricing,type:text,notnull,default:'{}'" json:"pricing" required:"true"`
// IsOverride marks the row as user-pinned. When true, Zeus skips it entirely.
IsOverride bool `bun:"is_override,notnull,default:false" json:"isOverride" required:"true"`
SyncedAt *time.Time `bun:"synced_at" json:"syncedAt,omitempty"`
Enabled bool `bun:"enabled,notnull,default:true" json:"enabled" required:"true"`
}
type GettableLLMPricingRule = LLMPricingRule
type StorableLLMPricingRule = LLMPricingRule
// UpdatableLLMPricingRule is one entry in the bulk upsert batch.
//
// Identification:
// - ID set → match by id (user editing a known row).
// - SourceID set → match by source_id (Zeus sync, or user editing a Zeus-synced row).
// - neither set → insert a new row with source_id = NULL (user-created custom rule).
//
// IsOverride is a pointer so the caller can distinguish "not sent" from "set to false".
// When IsOverride is nil AND the matched row has is_override = true, the row is fully
// preserved — only synced_at is stamped.
type UpdatableLLMPricingRule struct {
ID *valuer.UUID `json:"id,omitempty"`
SourceID *valuer.UUID `json:"sourceId,omitempty"`
Model string `json:"modelName" required:"true"`
Provider string `json:"provider" required:"true"`
ModelPattern []string `json:"modelPattern" required:"true"`
Unit LLMPricingRuleUnit `json:"unit" required:"true"`
Pricing LLMRulePricing `json:"pricing" required:"true"`
IsOverride *bool `json:"isOverride,omitempty"`
Enabled bool `json:"enabled" required:"true"`
}
type UpdatableLLMPricingRules struct {
Rules []UpdatableLLMPricingRule `json:"rules" required:"true"`
}
type ListPricingRulesQuery struct {
Offset int `query:"offset" json:"offset"`
Limit int `query:"limit" json:"limit"`
}
type GettablePricingRules struct {
Items []*GettableLLMPricingRule `json:"items" required:"true"`
Total int `json:"total" required:"true"`
Offset int `json:"offset" required:"true"`
Limit int `json:"limit" required:"true"`
}
func (LLMPricingRuleUnit) Enum() []any {
return []any{UnitPerMillionTokens}
}
func (LLMPricingRuleCacheMode) Enum() []any {
return []any{LLMPricingRuleCacheModeSubtract, LLMPricingRuleCacheModeAdditive, LLMPricingRuleCacheModeUnknown}
}
func (s StringSlice) Value() (driver.Value, error) {
if s == nil {
return "[]", nil
}
b, err := json.Marshal(s)
if err != nil {
return nil, err
}
return string(b), nil
}
func (s *StringSlice) Scan(src any) error {
var raw []byte
switch v := src.(type) {
case string:
raw = []byte(v)
case []byte:
raw = v
case nil:
*s = nil
return nil
default:
return errors.NewInternalf(errors.CodeInternal, "llmpricingruletypes: cannot scan %T into StringSlice", src)
}
return json.Unmarshal(raw, s)
}
func (p LLMRulePricing) Value() (driver.Value, error) {
b, err := json.Marshal(p)
if err != nil {
return nil, err
}
return string(b), nil
}
func (p *LLMRulePricing) Scan(src any) error {
var raw []byte
switch v := src.(type) {
case string:
raw = []byte(v)
case []byte:
raw = v
case nil:
*p = LLMRulePricing{}
return nil
default:
return errors.NewInternalf(errors.CodeInternal, "llmpricingruletypes: cannot scan %T into LLMRulePricing", src)
}
return json.Unmarshal(raw, p)
}
func NewGettableLLMPricingRulesFromLLMPricingRules(items []*LLMPricingRule, total, offset, limit int) *GettablePricingRules {
return &GettablePricingRules{
Items: items,
Total: total,
Offset: offset,
Limit: limit,
}
}

View File

@@ -0,0 +1,16 @@
package llmpricingruletypes
import (
"context"
"github.com/SigNoz/signoz/pkg/valuer"
)
type Store interface {
List(ctx context.Context, orgID valuer.UUID, offset, limit int) ([]*StorableLLMPricingRule, int, error)
Get(ctx context.Context, orgID, id valuer.UUID) (*StorableLLMPricingRule, error)
GetBySourceID(ctx context.Context, orgID, sourceID valuer.UUID) (*StorableLLMPricingRule, error)
Create(ctx context.Context, rule *StorableLLMPricingRule) error
Update(ctx context.Context, rule *StorableLLMPricingRule) error
Delete(ctx context.Context, orgID, id valuer.UUID) error
}

View File

@@ -0,0 +1,41 @@
package meterreportertypes
import (
"regexp"
"github.com/SigNoz/signoz/pkg/errors"
)
var nameRegex = regexp.MustCompile(`^[a-z][a-z0-9_.]+$`)
// Name is a concrete type for a meter name. Dotted namespace identifiers like
// "signoz.meter.log.count" are permitted; arbitrary strings are not, to avoid
// typos silently producing distinct meter rows at Zeus.
type Name struct {
s string
}
func NewName(s string) (Name, error) {
if !nameRegex.MatchString(s) {
return Name{}, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "invalid meter name: %s", s)
}
return Name{s: s}, nil
}
func MustNewName(s string) Name {
name, err := NewName(s)
if err != nil {
panic(err)
}
return name
}
func (n Name) String() string {
return n.s
}
func (n Name) IsZero() bool {
return n.s == ""
}

View File

@@ -0,0 +1,44 @@
package meterreportertypes
// Reading is a single meter value sent to Zeus. Re-sending the same logical
// reading for a window is expected and will overwrite the prior value instead
// of duplicating usage.
type Reading struct {
// MeterName is the fully-qualified meter identifier.
MeterName string `json:"name"`
// Value is the aggregated scalar for this meter over the reporting window.
Value float64 `json:"value"`
// Unit is the metric unit for this reading.
Unit string `json:"unit"`
// Aggregation names the aggregation applied to produce Value.
Aggregation string `json:"aggregation"`
// StartUnixMilli is the inclusive lower bound of the reporting window in
// epoch milliseconds (UTC day start for both sealed and partial readings).
StartUnixMilli int64 `json:"start_unix_milli"`
// EndUnixMilli is the exclusive upper bound of the reporting window in
// epoch milliseconds. For a sealed day it is the next day's 00:00 UTC; for
// the intra-day partial it is the tick's now() — hence each tick's partial
// carries a fresh EndUnixMilli while the idempotency key keeps it upserted.
EndUnixMilli int64 `json:"end_unix_milli"`
// IsCompleted is true only for sealed past buckets. In-progress buckets
// (e.g. the current UTC day) report IsCompleted=false so Zeus knows the value may still change.
IsCompleted bool `json:"is_completed"`
// Dimensions is the per-reading label set.
Dimensions map[string]string `json:"dimensions"`
}
// PostableMeterReadings is the request body for Zeus.PutMeterReadings. One
// request carries every meter reading for a single UTC day. Zeus accepts or
// rejects the batch as a whole — partial acceptance is not supported. The
// idempotency key is carried on the X-Idempotency-Key header, not in the body.
type PostableMeterReadings struct {
// Meters is the set of meter values being shipped for one day.
Meters []Reading `json:"meters"`
}

View File

@@ -73,6 +73,12 @@ func NewTraitsFromOrganization(org *Organization) map[string]any {
}
}
const (
TTLSettingStatusPending = "pending"
TTLSettingStatusFailed = "failed"
TTLSettingStatusSuccess = "success"
)
type TTLSetting struct {
bun.BaseModel `bun:"table:ttl_setting"`
Identifiable

View File

@@ -0,0 +1,20 @@
package retentiontypes
const (
DefaultLogsRetentionDays = 15
DefaultMetricsRetentionDays = 30
DefaultTracesRetentionDays = 15
)
// CustomRetentionRule is one custom retention rule as stored in ttl_setting.condition.
// Rules are evaluated in declaration order; the first matching rule wins.
type CustomRetentionRule struct {
Filters []FilterCondition `json:"conditions"`
TTLDays int `json:"ttlDays"`
}
// FilterCondition is one label-key, allowed-values condition inside a retention rule.
type FilterCondition struct {
Key string `json:"key"`
Values []string `json:"values"`
}

View File

@@ -3,6 +3,7 @@ package zeustypes
import (
"encoding/json"
"net/url"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/tidwall/gjson"
@@ -37,6 +38,11 @@ type Host struct {
URL string `json:"url" required:"true"`
}
type MeterCheckpoint struct {
Name string
Checkpoint time.Time
}
func NewGettableHost(data []byte) *GettableHost {
parsed := gjson.ParseBytes(data)
dns := parsed.Get("cluster.region.dns").String()

View File

@@ -49,6 +49,14 @@ func (provider *provider) PutMetersV2(_ context.Context, _ string, _ []byte) err
return errors.New(errors.TypeUnsupported, zeus.ErrCodeUnsupported, "putting meters v2 is not supported")
}
func (provider *provider) PutMeterReadings(_ context.Context, _ string, _ string, _ []byte) error {
return errors.New(errors.TypeUnsupported, zeus.ErrCodeUnsupported, "putting meter readings is not supported")
}
func (provider *provider) GetMeterCheckpoints(_ context.Context, _ string) ([]zeustypes.MeterCheckpoint, error) {
return nil, errors.New(errors.TypeUnsupported, zeus.ErrCodeUnsupported, "fetching meter checkpoints is not supported")
}
func (provider *provider) PutProfile(_ context.Context, _ string, _ *zeustypes.PostableProfile) error {
return errors.New(errors.TypeUnsupported, zeus.ErrCodeUnsupported, "putting profile is not supported")
}

View File

@@ -35,6 +35,16 @@ type Zeus interface {
// Puts the meters for the given license key using Zeus.
PutMetersV2(context.Context, string, []byte) error
// PutMeterReadings ships one day's batch of meter readings to the v2/meters
// endpoint. idempotencyKey is propagated as X-Idempotency-Key so Zeus can
// UPSERT on retries. The batch is accepted or rejected as a whole.
PutMeterReadings(ctx context.Context, licenseKey string, idempotencyKey string, body []byte) error
// GetMeterCheckpoints returns the latest sealed (is_completed=true) UTC day
// Zeus has stored for each billing meter name. Missing meter names are
// treated by the cron as bootstrap cases.
GetMeterCheckpoints(ctx context.Context, licenseKey string) ([]zeustypes.MeterCheckpoint, error)
// Put profile for the given license key.
PutProfile(context.Context, string, *zeustypes.PostableProfile) error

View File

@@ -72,7 +72,6 @@ class TraceOperatorQuery:
return_spans_from: str
limit: int | None = None
order: list[OrderBy] | None = None
select_fields: list[TelemetryFieldKey] | None = None
def to_dict(self) -> dict:
spec: dict[str, Any] = {
@@ -84,8 +83,6 @@ class TraceOperatorQuery:
spec["limit"] = self.limit
if self.order:
spec["order"] = [o.to_dict() if hasattr(o, "to_dict") else o for o in self.order]
if self.select_fields:
spec["selectFields"] = [f.to_dict() for f in self.select_fields]
return {"type": "builder_trace_operator", "spec": spec}

View File

@@ -1212,13 +1212,21 @@ def test_message_searches(
"aggregation": "count()",
"validate": lambda r: len(get_rows(r)) == 2 and set(_body_messages(r)) == payment_messages,
},
# FTS — bare keyword
# FTS — String bare keyword
{
"name": "msg.fts_quoted",
"requestType": "raw",
"expression": '"Payment"',
"aggregation": "count()",
"validate": lambda r: len(get_rows(r)) == 2 and all("Payment" in b.get("message", "") for b in _get_bodies(r)),
"validate": lambda r: len(get_rows(r)) == 2 and all("Payment" in b.get("message", "") for b in _get_bodies(r)) and r.json().get("data", {}).get("warning") is not None,
},
# FTS — bare keyword
{
"name": "msg.fts_quoted_without_quotes",
"requestType": "raw",
"expression": "Payment",
"aggregation": "count()",
"validate": lambda r: len(get_rows(r)) == 2 and all("Payment" in b.get("message", "") for b in _get_bodies(r)) and r.json().get("data", {}).get("warning") is not None,
},
# = operator via body.message — tests exact match path
{

View File

@@ -530,13 +530,11 @@ def test_export_traces_with_composite_query_trace_operator(
) -> None:
"""
Setup:
Insert a parent span and two child spans, all with an http.method attribute.
Insert multiple traces with parent-child relationships.
Tests:
1. Basic trace operator (A => B) returning parent spans, ordered by timestamp.
2. Same operator with selectFields=[service.name] and order by http.method, which is
NOT in selectFields — verifies the inner/outer subquery fix for the CH 25.12.5
NOT_FOUND_COLUMN_IN_BLOCK regression (ORDER BY col AS `col` in a CTE shape).
1. Export traces using trace operator in composite query (POST)
2. Verify trace operator query works correctly
"""
parent_trace_id = TraceIdGenerator.trace_id()
parent_span_id = TraceIdGenerator.span_id()
@@ -557,8 +555,12 @@ def test_export_traces_with_composite_query_trace_operator(
kind=TracesKind.SPAN_KIND_SERVER,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={"service.name": "parent-service"},
attributes={"operation.type": "parent", "http.method": "GET"},
resources={
"service.name": "parent-service",
},
attributes={
"operation.type": "parent",
},
),
Traces(
timestamp=now - timedelta(seconds=9),
@@ -570,8 +572,12 @@ def test_export_traces_with_composite_query_trace_operator(
kind=TracesKind.SPAN_KIND_INTERNAL,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={"service.name": "parent-service"},
attributes={"operation.type": "child", "http.method": "POST"},
resources={
"service.name": "parent-service",
},
attributes={
"operation.type": "child",
},
),
Traces(
timestamp=now - timedelta(seconds=7),
@@ -583,23 +589,31 @@ def test_export_traces_with_composite_query_trace_operator(
kind=TracesKind.SPAN_KIND_INTERNAL,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={"service.name": "parent-service"},
attributes={"operation.type": "child", "http.method": "POST"},
resources={
"service.name": "parent-service",
},
attributes={
"operation.type": "child",
},
),
]
)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
# Calculate timestamps in nanoseconds
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
end_ns = int(now.timestamp() * 1e9)
url = signoz.self.host_configs["8080"].get("/api/v1/export_raw_data?format=jsonl")
# A: spans with operation.type = 'parent'
query_a = BuilderQuery(
signal="traces",
name="A",
limit=1000,
filter_expression="operation.type = 'parent'",
)
# B: spans with operation.type = 'child'
query_b = BuilderQuery(
signal="traces",
name="B",
@@ -607,50 +621,47 @@ def test_export_traces_with_composite_query_trace_operator(
filter_expression="operation.type = 'child'",
)
def export(operator: TraceOperatorQuery) -> list[dict]:
body = QueryRangeRequest(
start=start_ns,
end=end_ns,
queries=[query_a, query_b, operator],
).to_dict()
resp = requests.post(
url,
json=body,
timeout=10,
headers={"authorization": f"Bearer {token}", "Content-Type": "application/json"},
)
assert resp.status_code == HTTPStatus.OK, resp.text
assert resp.headers["Content-Type"] == "application/x-ndjson"
return [json.loads(line) for line in resp.text.strip().split("\n") if line]
# Test 1: basic trace operator ordered by timestamp
spans = export(
TraceOperatorQuery(
name="C",
expression="A => B",
return_spans_from="A",
limit=1000,
order=[OrderBy(TelemetryFieldKey("timestamp", "string", "span"), "desc")],
)
# Trace operator: find traces where A has a direct descendant B
query_c = TraceOperatorQuery(
name="C",
expression="A => B",
return_spans_from="A",
limit=1000,
order=[OrderBy(TelemetryFieldKey("timestamp", "string", "span"), "desc")],
)
assert len(spans) == 1
assert all(s.get("trace_id") == parent_trace_id for s in spans)
assert any(s.get("name") == "parent-operation" for s in spans)
# Test 2: order-by field (http.method) absent from selectFields
spans = export(
TraceOperatorQuery(
name="C",
expression="A => B",
return_spans_from="A",
limit=1000,
select_fields=[TelemetryFieldKey("service.name", "string", "resource")],
order=[OrderBy(TelemetryFieldKey("http.method", "string", "tag"), "desc")],
)
body = QueryRangeRequest(
start=start_ns,
end=end_ns,
queries=[query_a, query_b, query_c],
).to_dict()
url = signoz.self.host_configs["8080"].get("/api/v1/export_raw_data?format=jsonl")
response = requests.post(
url,
json=body,
timeout=10,
headers={
"authorization": f"Bearer {token}",
"Content-Type": "application/json",
},
)
assert len(spans) >= 1
assert all(s.get("trace_id") == parent_trace_id for s in spans)
assert any(s.get("name") == "parent-operation" for s in spans)
assert response.status_code == HTTPStatus.OK
assert response.headers["Content-Type"] == "application/x-ndjson"
# Parse JSONL content
jsonl_lines = response.text.strip().split("\n")
assert len(jsonl_lines) == 1, f"Expected at least 1 line, got {len(jsonl_lines)}"
# Verify all returned spans belong to the matched trace
json_objects = [json.loads(line) for line in jsonl_lines]
trace_ids = [obj.get("trace_id") for obj in json_objects]
assert all(tid == parent_trace_id for tid in trace_ids)
# Verify the parent span (returnSpansFrom = "A") is present
span_names = [obj.get("name") for obj in json_objects]
assert "parent-operation" in span_names
def test_export_traces_with_select_fields(