Compare commits

..

12 Commits

Author SHA1 Message Date
Piyush Singariya
357444c94e Merge branch 'main' into traceop 2026-05-11 20:53:51 +05:30
Piyush Singariya
a8598f3bfa fix: alias all core columns 2026-05-11 20:53:09 +05:30
Piyush Singariya
bca71f9a33 chore: remove comments 2026-05-11 16:04:32 +05:30
Piyush Singariya
c93660357d chore: fmt python 2026-05-11 16:02:18 +05:30
Piyush Singariya
5651e3b7a8 Merge branch 'main' into traceop 2026-05-11 14:28:58 +05:30
Piyush Singariya
cf2cfbc7d4 fix: remove specific of timestamp 2026-05-11 14:27:01 +05:30
Piyush Singariya
a969c38224 chore: fmtlint 2026-05-07 13:53:12 +05:30
Piyush Singariya
b892a0f0a5 chore: file rename 2026-05-07 13:51:22 +05:30
Piyush Singariya
4d47762eba chore: separate e2e test file 2026-05-07 13:50:11 +05:30
Piyush Singariya
77396a0bb3 Merge branch 'main' into traceop 2026-05-07 12:56:59 +05:30
Piyush Singariya
28c05e1bab Merge branch 'main' into traceop 2026-05-04 14:27:19 +05:30
Piyush Singariya
2b9e383994 fix: trace raw export e2e 2026-04-30 15:25:43 +05:30
77 changed files with 1349 additions and 2837 deletions

View File

@@ -18,19 +18,16 @@ import (
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/gateway"
"github.com/SigNoz/signoz/pkg/gateway/noopgateway"
"github.com/SigNoz/signoz/pkg/global"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/licensing/nooplicensing"
"github.com/SigNoz/signoz/pkg/meterreporter"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration/implcloudintegration"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/modules/retention"
"github.com/SigNoz/signoz/pkg/modules/rulestatehistory"
"github.com/SigNoz/signoz/pkg/modules/serviceaccount"
"github.com/SigNoz/signoz/pkg/prometheus"
@@ -112,9 +109,6 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
func(_ licensing.Licensing) factory.NamedMap[factory.ProviderFactory[auditor.Auditor, auditor.Config]] {
return signoz.NewAuditorProviderFactories()
},
func(_ context.Context, _ factory.ProviderSettings, _ flagger.Flagger, _ licensing.Licensing, _ telemetrystore.TelemetryStore, _ retention.Getter, _ organization.Getter, _ zeus.Zeus) (factory.NamedMap[factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config]], string) {
return signoz.NewMeterReporterProviderFactories(), "noop"
},
func(ps factory.ProviderSettings, q querier.Querier, a analytics.Analytics) querier.Handler {
return querier.NewHandler(ps, q, a)
},

View File

@@ -1,55 +0,0 @@
package main
import (
"github.com/SigNoz/signoz/pkg/metercollector"
"github.com/SigNoz/signoz/pkg/telemetrylogs"
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
"github.com/SigNoz/signoz/pkg/telemetrytraces"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
"github.com/SigNoz/signoz/pkg/types/zeustypes"
)
var meterConfigs = []metercollector.Config{
{
Provider: metercollector.ProviderStatic,
Static: metercollector.StaticConfig{
Name: zeustypes.MeterPlatformActive,
Unit: zeustypes.MeterUnitCount,
Aggregation: zeustypes.MeterAggregationMax,
Value: 1,
},
},
{
Provider: metercollector.ProviderTelemetry,
Telemetry: metercollector.TelemetryConfig{
Name: zeustypes.MeterLogSize,
Unit: zeustypes.MeterUnitBytes,
Aggregation: zeustypes.MeterAggregationSum,
DBName: telemetrylogs.DBName,
TableName: telemetrylogs.LogsV2LocalTableName,
DefaultRetentionDays: retentiontypes.DefaultLogsRetentionDays,
},
},
{
Provider: metercollector.ProviderTelemetry,
Telemetry: metercollector.TelemetryConfig{
Name: zeustypes.MeterSpanSize,
Unit: zeustypes.MeterUnitBytes,
Aggregation: zeustypes.MeterAggregationSum,
DBName: telemetrytraces.DBName,
TableName: telemetrytraces.SpanIndexV3LocalTableName,
DefaultRetentionDays: retentiontypes.DefaultTracesRetentionDays,
},
},
{
Provider: metercollector.ProviderTelemetry,
Telemetry: metercollector.TelemetryConfig{
Name: zeustypes.MeterDatapointCount,
Unit: zeustypes.MeterUnitCount,
Aggregation: zeustypes.MeterAggregationSum,
DBName: telemetrymetrics.DBName,
TableName: telemetrymetrics.SamplesV4LocalTableName,
DefaultRetentionDays: retentiontypes.DefaultMetricsRetentionDays,
},
},
}

View File

@@ -18,9 +18,6 @@ import (
"github.com/SigNoz/signoz/ee/gateway/httpgateway"
enterpriselicensing "github.com/SigNoz/signoz/ee/licensing"
"github.com/SigNoz/signoz/ee/licensing/httplicensing"
"github.com/SigNoz/signoz/ee/metercollector/staticmetercollector"
"github.com/SigNoz/signoz/ee/metercollector/telemetrymetercollector"
"github.com/SigNoz/signoz/ee/meterreporter/httpmeterreporter"
"github.com/SigNoz/signoz/ee/modules/cloudintegration/implcloudintegration"
"github.com/SigNoz/signoz/ee/modules/cloudintegration/implcloudintegration/implcloudprovider"
"github.com/SigNoz/signoz/ee/modules/dashboard/impldashboard"
@@ -39,17 +36,14 @@ import (
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
pkgflagger "github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/gateway"
"github.com/SigNoz/signoz/pkg/global"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/meterreporter"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
pkgcloudintegration "github.com/SigNoz/signoz/pkg/modules/cloudintegration/implcloudintegration"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
pkgimpldashboard "github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/modules/retention"
"github.com/SigNoz/signoz/pkg/modules/rulestatehistory"
"github.com/SigNoz/signoz/pkg/modules/serviceaccount"
"github.com/SigNoz/signoz/pkg/prometheus"
@@ -167,20 +161,6 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
}
return factories
},
func(ctx context.Context, providerSettings factory.ProviderSettings, flagger pkgflagger.Flagger, licensing licensing.Licensing, telemetryStore telemetrystore.TelemetryStore, retentionGetter retention.Getter, orgGetter organization.Getter, zeus zeus.Zeus) (factory.NamedMap[factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config]], string) {
factories := signoz.NewMeterReporterProviderFactories()
collectorFactories := factory.MustNewNamedMap(
staticmetercollector.NewFactory(),
telemetrymetercollector.NewFactory(telemetryStore, retentionGetter),
)
if err := factories.Add(httpmeterreporter.NewFactory(collectorFactories, meterConfigs, flagger, licensing, orgGetter, zeus)); err != nil {
panic(err)
}
return factories, "http"
},
func(ps factory.ProviderSettings, q querier.Querier, a analytics.Analytics) querier.Handler {
communityHandler := querier.NewHandler(ps, q, a)
return eequerier.NewHandler(ps, q, communityHandler)

View File

@@ -429,10 +429,3 @@ authz:
openfga:
# maximum tuples allowed per openfga write operation.
max_tuples_per_write: 300
##################### Meter Reporter #####################
meterreporter:
# The interval between collection ticks. Minimum 5m.
interval: 6h
# Whether to backfill sealed days from the license creation day.
backfill: true

View File

@@ -1,61 +0,0 @@
// Package staticmetercollector emits a fixed-value meter reading per org per window.
package staticmetercollector
import (
"context"
"time"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/metercollector"
"github.com/SigNoz/signoz/pkg/types/licensetypes"
"github.com/SigNoz/signoz/pkg/types/zeustypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
var _ metercollector.MeterCollector = (*Provider)(nil)
type Provider struct {
settings factory.ScopedProviderSettings
config metercollector.StaticConfig
}
func NewFactory() factory.ProviderFactory[metercollector.MeterCollector, metercollector.Config] {
return factory.NewProviderFactory(factory.MustNewName(metercollector.ProviderStatic), func(ctx context.Context, providerSettings factory.ProviderSettings, config metercollector.Config) (metercollector.MeterCollector, error) {
return newProvider(providerSettings, config.Static), nil
},
)
}
func newProvider(providerSettings factory.ProviderSettings, config metercollector.StaticConfig) *Provider {
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/ee/metercollector/staticmetercollector")
return &Provider{
settings: settings,
config: config,
}
}
func (provider *Provider) Name() zeustypes.MeterName { return provider.config.Name }
func (provider *Provider) Unit() zeustypes.MeterUnit { return provider.config.Unit }
func (provider *Provider) Aggregation() zeustypes.MeterAggregation {
return provider.config.Aggregation
}
func (provider *Provider) Origin(_ context.Context, _ valuer.UUID, license *licensetypes.License, _ time.Time) (time.Time, error) {
if license == nil || license.CreatedAt.IsZero() {
return time.Time{}, nil
}
createdAt := license.CreatedAt.UTC()
return time.Date(createdAt.Year(), createdAt.Month(), createdAt.Day(), 0, 0, 0, 0, time.UTC), nil
}
func (provider *Provider) Collect(_ context.Context, orgID valuer.UUID, license *licensetypes.License, window zeustypes.MeterWindow) ([]zeustypes.Meter, error) {
if license == nil || license.Key == "" {
return nil, nil
}
return []zeustypes.Meter{
zeustypes.NewMeter(provider.config.Name, provider.config.Value, provider.config.Unit, provider.config.Aggregation, window, zeustypes.NewDimensions(zeustypes.OrganizationID.String(orgID.StringValue()))),
}, nil
}

View File

@@ -1,247 +0,0 @@
// Package telemetrymetercollector collects telemetry meters (logs, traces, metrics)
// by retention. One Provider materializes per TelemetryConfig.
package telemetrymetercollector
import (
"context"
"fmt"
"regexp"
"strconv"
"strings"
"time"
"github.com/huandu/go-sqlbuilder"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/metercollector"
"github.com/SigNoz/signoz/pkg/modules/retention"
"github.com/SigNoz/signoz/pkg/telemetrymeter"
"github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/types/licensetypes"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
"github.com/SigNoz/signoz/pkg/types/zeustypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
var (
labelKeyPattern = regexp.MustCompile(`^[A-Za-z0-9_.\-]+$`)
labelValuePattern = regexp.MustCompile(`^[A-Za-z0-9_.\-:]+$`)
)
var _ metercollector.MeterCollector = (*Provider)(nil)
type Provider struct {
settings factory.ScopedProviderSettings
config metercollector.TelemetryConfig
telemetryStore telemetrystore.TelemetryStore
retentionGetter retention.Getter
}
func NewFactory(telemetryStore telemetrystore.TelemetryStore, retentionGetter retention.Getter) factory.ProviderFactory[metercollector.MeterCollector, metercollector.Config] {
return factory.NewProviderFactory(factory.MustNewName(metercollector.ProviderTelemetry), func(ctx context.Context, providerSettings factory.ProviderSettings, config metercollector.Config) (metercollector.MeterCollector, error) {
return newProvider(providerSettings, config.Telemetry, telemetryStore, retentionGetter), nil
},
)
}
func newProvider(
providerSettings factory.ProviderSettings,
config metercollector.TelemetryConfig,
telemetryStore telemetrystore.TelemetryStore,
retentionGetter retention.Getter,
) *Provider {
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/ee/metercollector/telemetrymetercollector")
return &Provider{
settings: settings,
config: config,
telemetryStore: telemetryStore,
retentionGetter: retentionGetter,
}
}
func (provider *Provider) Name() zeustypes.MeterName { return provider.config.Name }
func (provider *Provider) Unit() zeustypes.MeterUnit { return provider.config.Unit }
func (provider *Provider) Aggregation() zeustypes.MeterAggregation {
return provider.config.Aggregation
}
func (provider *Provider) Origin(ctx context.Context, _ valuer.UUID, _ *licensetypes.License, todayStart time.Time) (time.Time, error) {
query, args := buildOriginQuery(provider.config.Name.String())
var minMs int64
if err := provider.telemetryStore.ClickhouseDB().QueryRow(ctx, query, args...).Scan(&minMs); err != nil {
return time.Time{}, err
}
if minMs == 0 {
return todayStart, nil
}
minDay := time.UnixMilli(minMs).UTC()
return time.Date(minDay.Year(), minDay.Month(), minDay.Day(), 0, 0, 0, 0, time.UTC), nil
}
func (provider *Provider) Collect(
ctx context.Context,
orgID valuer.UUID,
_ *licensetypes.License,
window zeustypes.MeterWindow,
) ([]zeustypes.Meter, error) {
meterName := provider.config.Name.String()
segments, err := provider.retentionGetter.GetRetentionPolicySegments(
ctx,
orgID,
provider.config.DBName,
provider.config.TableName,
provider.config.DefaultRetentionDays,
window.StartUnixMilli,
window.EndUnixMilli,
)
if err != nil {
return nil, err
}
valuesByRetentionDays := make(map[int]int64)
for _, segment := range segments {
query, args, err := buildQuery(meterName, segment)
if err != nil {
return nil, err
}
rows, err := provider.telemetryStore.ClickhouseDB().Query(ctx, query, args...)
if err != nil {
return nil, err
}
if err := func() error {
defer rows.Close()
for rows.Next() {
var retentionDays int32
var value int64
if err := rows.Scan(&retentionDays, &value); err != nil {
return err
}
valuesByRetentionDays[int(retentionDays)] += value
}
if err := rows.Err(); err != nil {
return err
}
return nil
}(); err != nil {
return nil, err
}
}
meters := make([]zeustypes.Meter, 0, len(valuesByRetentionDays))
for retentionDays, value := range valuesByRetentionDays {
meters = append(meters, zeustypes.NewMeter(provider.config.Name, value, provider.config.Unit, provider.config.Aggregation, window, buildDimensions(orgID, retentionDays)))
}
// Empty windows still emit a sentinel so checkpoints can advance.
if len(meters) == 0 && len(segments) > 0 {
meters = append(meters, zeustypes.NewMeter(provider.config.Name, 0, provider.config.Unit, provider.config.Aggregation, window, buildDimensions(orgID, segments[len(segments)-1].DefaultDays)))
}
return meters, nil
}
func buildOriginQuery(meterName string) (string, []any) {
sb := sqlbuilder.NewSelectBuilder()
sb.Select("toInt64(ifNull(min(unix_milli), 0))")
sb.From(telemetrymeter.DBName + "." + telemetrymeter.SamplesTableName)
sb.Where(sb.Equal("metric_name", meterName))
return sb.BuildWithFlavor(sqlbuilder.ClickHouse)
}
func buildQuery(meterName string, segment *retentiontypes.RetentionPolicySegment) (string, []any, error) {
retentionExpr, err := buildRetentionMultiIfSQL(segment.Rules, segment.DefaultDays)
if err != nil {
return "", nil, err
}
selects := []string{
retentionExpr + " AS retention_days",
"toInt64(ifNull(sum(value), 0)) AS value",
}
sb := sqlbuilder.NewSelectBuilder()
sb.Select(selects...)
sb.From(telemetrymeter.DBName + "." + telemetrymeter.SamplesTableName)
sb.Where(
sb.Equal("metric_name", meterName),
sb.GTE("unix_milli", segment.StartMs),
sb.LT("unix_milli", segment.EndMs),
)
sb.GroupBy("retention_days")
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
return query, args, nil
}
func buildRetentionMultiIfSQL(rules []retentiontypes.CustomRetentionRule, defaultDays int) (string, error) {
if defaultDays <= 0 {
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "non-positive default retention %d", defaultDays)
}
if len(rules) == 0 {
return "toInt32(" + strconv.Itoa(defaultDays) + ")", nil
}
arms := make([]string, 0, 2*len(rules)+1)
for ruleIndex, rule := range rules {
if rule.TTLDays <= 0 {
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "rule %d has non-positive ttl_days %d", ruleIndex, rule.TTLDays)
}
conditionExpr, err := buildRuleConditionSQL(ruleIndex, rule)
if err != nil {
return "", err
}
arms = append(arms, conditionExpr)
arms = append(arms, strconv.Itoa(rule.TTLDays))
}
arms = append(arms, strconv.Itoa(defaultDays))
return "toInt32(multiIf(" + strings.Join(arms, ", ") + "))", nil
}
func buildRuleConditionSQL(ruleIndex int, rule retentiontypes.CustomRetentionRule) (string, error) {
if len(rule.Filters) == 0 {
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "rule %d has no filters", ruleIndex)
}
filterExprs := make([]string, 0, len(rule.Filters))
for filterIndex, filter := range rule.Filters {
if !labelKeyPattern.MatchString(filter.Key) {
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "rule %d filter %d has invalid key %q", ruleIndex, filterIndex, filter.Key)
}
if len(filter.Values) == 0 {
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "rule %d filter %d has no values", ruleIndex, filterIndex)
}
quoted := make([]string, len(filter.Values))
for valueIndex, value := range filter.Values {
if !labelValuePattern.MatchString(value) {
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "rule %d filter %d value %d is invalid %q", ruleIndex, filterIndex, valueIndex, value)
}
quoted[valueIndex] = "'" + value + "'"
}
filterExprs = append(filterExprs, fmt.Sprintf("JSONExtractString(labels, '%s') IN (%s)", filter.Key, strings.Join(quoted, ", ")))
}
return strings.Join(filterExprs, " AND "), nil
}
func buildDimensions(orgID valuer.UUID, retentionDays int) map[string]string {
retentionDurationSeconds := int64(retentionDays) * 24 * 60 * 60 // seconds
return zeustypes.NewDimensions(
zeustypes.OrganizationID.String(orgID.StringValue()),
zeustypes.RetentionDuration.String(strconv.FormatInt(retentionDurationSeconds, 10)),
)
}

View File

@@ -1,318 +0,0 @@
package httpmeterreporter
import (
"context"
"encoding/json"
"fmt"
"log/slog"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/metercollector"
"github.com/SigNoz/signoz/pkg/meterreporter"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/featuretypes"
"github.com/SigNoz/signoz/pkg/types/licensetypes"
"github.com/SigNoz/signoz/pkg/types/zeustypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/SigNoz/signoz/pkg/zeus"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/trace"
)
var _ factory.ServiceWithHealthy = (*Provider)(nil)
type Provider struct {
settings factory.ScopedProviderSettings
config meterreporter.Config
collectorsByName map[zeustypes.MeterName]metercollector.MeterCollector
flagger flagger.Flagger
licensing licensing.Licensing
orgGetter organization.Getter
zeus zeus.Zeus
healthyC chan struct{}
stopC chan struct{}
metrics *reporterMetrics
}
func NewFactory(collectorFactories factory.NamedMap[factory.ProviderFactory[metercollector.MeterCollector, metercollector.Config]], collectorConfigs []metercollector.Config, flagger flagger.Flagger, licensing licensing.Licensing, orgGetter organization.Getter, zeus zeus.Zeus) factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config] {
return factory.NewProviderFactory(factory.MustNewName("http"), func(ctx context.Context, providerSettings factory.ProviderSettings, config meterreporter.Config) (meterreporter.Reporter, error) {
return newProvider(ctx, providerSettings, config, collectorFactories, collectorConfigs, flagger, licensing, orgGetter, zeus)
},
)
}
func newProvider(
ctx context.Context,
providerSettings factory.ProviderSettings,
config meterreporter.Config,
collectorFactories factory.NamedMap[factory.ProviderFactory[metercollector.MeterCollector, metercollector.Config]],
collectorConfigs []metercollector.Config,
flagger flagger.Flagger,
licensing licensing.Licensing,
orgGetter organization.Getter,
zeus zeus.Zeus,
) (*Provider, error) {
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/ee/meterreporter/httpmeterreporter")
collectorsByName := map[zeustypes.MeterName]metercollector.MeterCollector{}
for _, collectorConfig := range collectorConfigs {
collector, err := factory.NewProviderFromNamedMap(ctx, providerSettings, collectorConfig, collectorFactories, collectorConfig.Provider)
if err != nil {
return nil, err
}
if _, exists := collectorsByName[collector.Name()]; exists {
return nil, errors.Newf(errors.TypeAlreadyExists, errors.CodeAlreadyExists, "duplicate meter collector %q", collector.Name())
}
collectorsByName[collector.Name()] = collector
}
metrics, err := newReporterMetrics(settings.Meter())
if err != nil {
return nil, err
}
return &Provider{
settings: settings,
config: config,
collectorsByName: collectorsByName,
flagger: flagger,
licensing: licensing,
orgGetter: orgGetter,
zeus: zeus,
healthyC: make(chan struct{}),
stopC: make(chan struct{}),
metrics: metrics,
}, nil
}
func (provider *Provider) Start(ctx context.Context) error {
close(provider.healthyC)
provider.collect(ctx)
ticker := time.NewTicker(provider.config.Interval)
defer ticker.Stop()
for {
select {
case <-provider.stopC:
return nil
case <-ticker.C:
provider.collect(ctx)
}
}
}
func (provider *Provider) collect(ctx context.Context) {
ctx, span := provider.settings.Tracer().Start(ctx, "meterreporter.Collect", trace.WithAttributes(attribute.String("meterreporter.provider", "http")))
defer span.End()
orgs, err := provider.orgGetter.ListByOwnedKeyRange(ctx)
if err != nil {
span.RecordError(err)
provider.settings.Logger().ErrorContext(ctx, "failed to get orgs data", errors.Attr(err))
return
}
for _, org := range orgs {
evalCtx := featuretypes.NewFlaggerEvaluationContext(org.ID)
if !provider.flagger.BooleanOrEmpty(ctx, flagger.FeatureUseMeterReporter, evalCtx) {
provider.settings.Logger().DebugContext(ctx, "meter reporter disabled for org, skipping reporting", slog.String("org_id", org.ID.StringValue()))
continue
}
license, err := provider.licensing.GetActive(ctx, org.ID)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
provider.settings.Logger().DebugContext(ctx, "no active license found for org, skipping reporting", slog.String("org_id", org.ID.StringValue()))
continue
}
span.RecordError(err)
provider.settings.Logger().ErrorContext(ctx, "failed to fetch active license for org", errors.Attr(err), slog.String("org_id", org.ID.StringValue()))
return
}
if err := provider.collectOrg(ctx, org, license); err != nil {
span.RecordError(err)
provider.settings.Logger().ErrorContext(ctx, "failed to collect meters", errors.Attr(err), slog.String("org_id", org.ID.StringValue()))
}
}
}
func (provider *Provider) Stop(ctx context.Context) error {
close(provider.stopC)
return nil
}
func (provider *Provider) Healthy() <-chan struct{} {
return provider.healthyC
}
func (provider *Provider) collectOrg(ctx context.Context, org *types.Organization, license *licensetypes.License) error {
now := time.Now().UTC()
// Use one timestamp so a tick cannot straddle midnight.
todayStart := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.UTC)
if provider.config.Backfill {
checkpointsByMeter, err := provider.checkpoints(ctx, license.Key)
if err != nil {
return err
}
nextByCollector := provider.nextDays(license, todayStart, checkpointsByMeter)
start, end, ok := backfillRange(nextByCollector, todayStart)
if ok {
for day := start; !day.After(end); day = day.AddDate(0, 0, 1) {
eligible := eligibleCollectors(provider.collectorsByName, nextByCollector, day)
if len(eligible) == 0 {
continue
}
window, err := zeustypes.NewMeterWindow(day.UnixMilli(), day.AddDate(0, 0, 1).UnixMilli(), true)
if err != nil {
return err
}
if err := provider.report(ctx, org.ID, license, window, eligible); err != nil {
provider.settings.Logger().WarnContext(ctx, "failed to backfill for day", errors.Attr(err), slog.String("date", day.Format("2006-01-02")))
return err
}
}
}
}
// Today's partial window: every collector is always eligible (next <= today).
if now.UnixMilli() > todayStart.UnixMilli() {
todayWindow, err := zeustypes.NewMeterWindow(todayStart.UnixMilli(), now.UnixMilli(), false)
if err != nil {
return err
}
return provider.report(ctx, org.ID, license, todayWindow, provider.collectorsByName)
}
return nil
}
func (provider *Provider) checkpoints(ctx context.Context, licenseKey string) (map[string]time.Time, error) {
list, err := provider.zeus.ListMeterCheckpoints(ctx, licenseKey)
if err != nil {
provider.metrics.checkpoints.Add(ctx, 1, metric.WithAttributes(errors.TypeAttr(err)))
return nil, err
}
provider.metrics.checkpoints.Add(ctx, 1)
checkpointsByMeter := make(map[string]time.Time, len(list))
for _, checkpoint := range list {
checkpointsByMeter[checkpoint.Name] = checkpoint.StartDate.UTC()
}
return checkpointsByMeter, nil
}
func (provider *Provider) nextDays(license *licensetypes.License, todayStart time.Time, checkpointsByMeter map[string]time.Time) map[zeustypes.MeterName]time.Time {
nextByCollector := make(map[zeustypes.MeterName]time.Time, len(provider.collectorsByName))
licenseCreatedAt := license.CreatedAt.UTC()
licenseCreatedAtDay := time.Date(licenseCreatedAt.Year(), licenseCreatedAt.Month(), licenseCreatedAt.Day(), 0, 0, 0, 0, time.UTC)
for _, collector := range provider.collectorsByName {
checkpoint, hasCheckpoint := checkpointsByMeter[collector.Name().String()]
nextByCollector[collector.Name()] = nextReportableDay(licenseCreatedAtDay, todayStart, checkpoint, hasCheckpoint)
}
return nextByCollector
}
func nextReportableDay(licenseCreatedAtDay time.Time, todayStart time.Time, checkpoint time.Time, hasCheckpoint bool) time.Time {
next := licenseCreatedAtDay
if next.IsZero() {
next = todayStart
}
if hasCheckpoint {
checkpointNext := checkpoint.AddDate(0, 0, 1)
if checkpointNext.After(next) {
next = checkpointNext
}
}
return next
}
func (provider *Provider) report(ctx context.Context, orgID valuer.UUID, license *licensetypes.License, window zeustypes.MeterWindow, collectors map[zeustypes.MeterName]metercollector.MeterCollector) error {
date := time.UnixMilli(window.StartUnixMilli).UTC().Format("2006-01-02")
meters := make([]zeustypes.Meter, 0, len(collectors))
for _, collector := range collectors {
meterAttr := attribute.String("signoz.meter.name", collector.Name().String())
collectedReadings, err := collector.Collect(ctx, orgID, license, window)
if err != nil {
provider.metrics.collections.Add(ctx, 1, metric.WithAttributes(meterAttr, errors.TypeAttr(err)))
continue
}
provider.metrics.collections.Add(ctx, 1, metric.WithAttributes(meterAttr))
meters = append(meters, collectedReadings...)
}
if len(meters) == 0 {
return nil
}
idempotencyKey := fmt.Sprintf("meterreporter:%s", date)
body, err := json.Marshal(meters)
if err != nil {
provider.metrics.reports.Add(ctx, 1, metric.WithAttributes(errors.TypeAttr(err)))
return err
}
if err := provider.zeus.PutMetersV3(ctx, license.Key, idempotencyKey, body); err != nil {
provider.metrics.reports.Add(ctx, 1, metric.WithAttributes(errors.TypeAttr(err)))
return err
}
provider.metrics.reports.Add(ctx, 1)
provider.metrics.meters.Add(ctx, int64(len(meters)))
return nil
}
// backfillRange returns the inclusive sealed-day range ending at yesterday.
func backfillRange(nextByCollector map[zeustypes.MeterName]time.Time, todayStart time.Time) (start, end time.Time, ok bool) {
yesterday := todayStart.AddDate(0, 0, -1)
for _, next := range nextByCollector {
if !next.Before(todayStart) {
continue
}
if start.IsZero() || next.Before(start) {
start = next
}
}
if start.IsZero() || start.After(yesterday) {
return time.Time{}, time.Time{}, false
}
return start, yesterday, true
}
func eligibleCollectors(collectors map[zeustypes.MeterName]metercollector.MeterCollector, nextByCollector map[zeustypes.MeterName]time.Time, day time.Time) map[zeustypes.MeterName]metercollector.MeterCollector {
eligible := make(map[zeustypes.MeterName]metercollector.MeterCollector, len(collectors))
for name, collector := range collectors {
if !nextByCollector[name].After(day) {
eligible[name] = collector
}
}
return eligible
}

View File

@@ -1,48 +0,0 @@
package httpmeterreporter
import (
"github.com/SigNoz/signoz/pkg/errors"
"go.opentelemetry.io/otel/metric"
)
type reporterMetrics struct {
checkpoints metric.Int64Counter
reports metric.Int64Counter
collections metric.Int64Counter
meters metric.Int64Counter
}
func newReporterMetrics(meter metric.Meter) (*reporterMetrics, error) {
var errs error
checkpoints, err := meter.Int64Counter("signoz.meterreporter.checkpoints", metric.WithDescription("Zeus meter checkpoint fetches."), metric.WithUnit("{checkpoint}"))
if err != nil {
errs = errors.Join(errs, err)
}
reports, err := meter.Int64Counter("signoz.meterreporter.reports", metric.WithDescription("Meter reports shipped to Zeus."), metric.WithUnit("{report}"))
if err != nil {
errs = errors.Join(errs, err)
}
collections, err := meter.Int64Counter("signoz.meterreporter.collections", metric.WithDescription("Per-meter collect calls."), metric.WithUnit("{collection}"))
if err != nil {
errs = errors.Join(errs, err)
}
meters, err := meter.Int64Counter("signoz.meterreporter.meters", metric.WithDescription("Meter readings shipped to Zeus."), metric.WithUnit("{meter}"))
if err != nil {
errs = errors.Join(errs, err)
}
if errs != nil {
return nil, errs
}
return &reporterMetrics{
checkpoints: checkpoints,
reports: reports,
collections: collections,
meters: meters,
}, nil
}

View File

@@ -150,72 +150,6 @@ func (provider *Provider) PutMetersV2(ctx context.Context, key string, data []by
return err
}
func (provider *Provider) PutMetersV3(ctx context.Context, key string, idempotencyKey string, data []byte) error {
headers := http.Header{}
if idempotencyKey != "" {
headers.Set("X-Idempotency-Key", idempotencyKey)
}
_, err := provider.doWithHeaders(
ctx,
provider.config.URL.JoinPath("/v2/meters"),
http.MethodPost,
key,
data,
headers,
)
return err
}
func (provider *Provider) ListMeterCheckpoints(ctx context.Context, key string) ([]zeustypes.MeterCheckpoint, error) {
response, err := provider.do(
ctx,
provider.config.URL.JoinPath("/v2/meters/checkpoints"),
http.MethodGet,
key,
nil,
)
if err != nil {
return nil, err
}
checkpointValues := gjson.GetBytes(response, "data")
if !checkpointValues.Exists() || checkpointValues.Type == gjson.Null {
return nil, errors.Newf(errors.TypeInternal, zeus.ErrCodeResponseMalformed, "meter checkpoints are required")
}
if !checkpointValues.IsArray() {
return nil, errors.Newf(errors.TypeInternal, zeus.ErrCodeResponseMalformed, "meter checkpoints must be an array")
}
checkpointResults := checkpointValues.Array()
checkpoints := make([]zeustypes.MeterCheckpoint, 0, len(checkpointResults))
for _, checkpointValue := range checkpointResults {
name := checkpointValue.Get("name").String()
if name == "" {
return nil, errors.Newf(errors.TypeInternal, zeus.ErrCodeResponseMalformed, "meter checkpoint name is required")
}
startDateString := checkpointValue.Get("start_date").String()
if startDateString == "" {
return nil, errors.Newf(errors.TypeInternal, zeus.ErrCodeResponseMalformed, "meter checkpoint start_date is required for %q", name)
}
startDate, err := time.Parse("2006-01-02", startDateString)
if err != nil {
return nil, errors.Wrapf(err, errors.TypeInternal, zeus.ErrCodeResponseMalformed, "parse meter checkpoint start_date %q for %q", startDateString, name)
}
checkpoints = append(checkpoints, zeustypes.MeterCheckpoint{
Name: name,
StartDate: startDate,
})
}
return checkpoints, nil
}
func (provider *Provider) PutProfile(ctx context.Context, key string, profile *zeustypes.PostableProfile) error {
body, err := json.Marshal(profile)
if err != nil {
@@ -251,21 +185,12 @@ func (provider *Provider) PutHost(ctx context.Context, key string, host *zeustyp
}
func (provider *Provider) do(ctx context.Context, url *url.URL, method string, key string, requestBody []byte) ([]byte, error) {
return provider.doWithHeaders(ctx, url, method, key, requestBody, nil)
}
func (provider *Provider) doWithHeaders(ctx context.Context, url *url.URL, method string, key string, requestBody []byte, extraHeaders http.Header) ([]byte, error) {
request, err := http.NewRequestWithContext(ctx, method, url.String(), bytes.NewBuffer(requestBody))
if err != nil {
return nil, err
}
request.Header.Set("X-Signoz-Cloud-Api-Key", key)
request.Header.Set("Content-Type", "application/json")
for k, vs := range extraHeaders {
for _, v := range vs {
request.Header.Add(k, v)
}
}
response, err := provider.httpClient.Do(request)
if err != nil {

File diff suppressed because one or more lines are too long

View File

@@ -24,13 +24,12 @@ HASTOKEN=23
HAS=24
HASANY=25
HASALL=26
SEARCH=27
BOOL=28
NUMBER=29
QUOTED_TEXT=30
KEY=31
WS=32
FREETEXT=33
BOOL=27
NUMBER=28
QUOTED_TEXT=29
KEY=30
WS=31
FREETEXT=32
'('=1
')'=2
'['=3

File diff suppressed because one or more lines are too long

View File

@@ -24,13 +24,12 @@ HASTOKEN=23
HAS=24
HASANY=25
HASALL=26
SEARCH=27
BOOL=28
NUMBER=29
QUOTED_TEXT=30
KEY=31
WS=32
FREETEXT=33
BOOL=27
NUMBER=28
QUOTED_TEXT=29
KEY=30
WS=31
FREETEXT=32
'('=1
')'=2
'['=3

View File

@@ -1,4 +1,4 @@
// Generated from grammar/FilterQuery.g4 by ANTLR 4.13.2
// Generated from FilterQuery.g4 by ANTLR 4.13.1
// noinspection ES6UnusedImports,JSUnusedGlobalSymbols,JSUnusedLocalSymbols
import {
ATN,
@@ -38,13 +38,12 @@ export default class FilterQueryLexer extends Lexer {
public static readonly HAS = 24;
public static readonly HASANY = 25;
public static readonly HASALL = 26;
public static readonly SEARCH = 27;
public static readonly BOOL = 28;
public static readonly NUMBER = 29;
public static readonly QUOTED_TEXT = 30;
public static readonly KEY = 31;
public static readonly WS = 32;
public static readonly FREETEXT = 33;
public static readonly BOOL = 27;
public static readonly NUMBER = 28;
public static readonly QUOTED_TEXT = 29;
public static readonly KEY = 30;
public static readonly WS = 31;
public static readonly FREETEXT = 32;
public static readonly EOF = Token.EOF;
public static readonly channelNames: string[] = [ "DEFAULT_TOKEN_CHANNEL", "HIDDEN" ];
@@ -69,9 +68,8 @@ export default class FilterQueryLexer extends Lexer {
"AND", "OR",
"HASTOKEN",
"HAS", "HASANY",
"HASALL", "SEARCH",
"BOOL", "NUMBER",
"QUOTED_TEXT",
"HASALL", "BOOL",
"NUMBER", "QUOTED_TEXT",
"KEY", "WS",
"FREETEXT" ];
public static readonly modeNames: string[] = [ "DEFAULT_MODE", ];
@@ -80,8 +78,8 @@ export default class FilterQueryLexer extends Lexer {
"LPAREN", "RPAREN", "LBRACK", "RBRACK", "COMMA", "EQUALS", "NOT_EQUALS",
"NEQ", "LT", "LE", "GT", "GE", "LIKE", "ILIKE", "BETWEEN", "EXISTS", "REGEXP",
"CONTAINS", "IN", "NOT", "AND", "OR", "HASTOKEN", "HAS", "HASANY", "HASALL",
"SEARCH", "BOOL", "SIGN", "NUMBER", "QUOTED_TEXT", "SEGMENT", "EMPTY_BRACKS",
"OLD_JSON_BRACKS", "KEY", "WS", "DIGIT", "FREETEXT",
"BOOL", "SIGN", "NUMBER", "QUOTED_TEXT", "SEGMENT", "EMPTY_BRACKS", "OLD_JSON_BRACKS",
"KEY", "WS", "DIGIT", "FREETEXT",
];
@@ -102,122 +100,119 @@ export default class FilterQueryLexer extends Lexer {
public get modeNames(): string[] { return FilterQueryLexer.modeNames; }
public static readonly _serializedATN: number[] = [4,0,33,329,6,-1,2,0,
public static readonly _serializedATN: number[] = [4,0,32,320,6,-1,2,0,
7,0,2,1,7,1,2,2,7,2,2,3,7,3,2,4,7,4,2,5,7,5,2,6,7,6,2,7,7,7,2,8,7,8,2,9,
7,9,2,10,7,10,2,11,7,11,2,12,7,12,2,13,7,13,2,14,7,14,2,15,7,15,2,16,7,
16,2,17,7,17,2,18,7,18,2,19,7,19,2,20,7,20,2,21,7,21,2,22,7,22,2,23,7,23,
2,24,7,24,2,25,7,25,2,26,7,26,2,27,7,27,2,28,7,28,2,29,7,29,2,30,7,30,2,
31,7,31,2,32,7,32,2,33,7,33,2,34,7,34,2,35,7,35,2,36,7,36,2,37,7,37,1,0,
1,0,1,1,1,1,1,2,1,2,1,3,1,3,1,4,1,4,1,5,1,5,1,5,3,5,91,8,5,1,6,1,6,1,6,
1,7,1,7,1,7,1,8,1,8,1,9,1,9,1,9,1,10,1,10,1,11,1,11,1,11,1,12,1,12,1,12,
1,12,1,12,1,13,1,13,1,13,1,13,1,13,1,13,1,14,1,14,1,14,1,14,1,14,1,14,1,
14,1,14,1,15,1,15,1,15,1,15,1,15,1,15,3,15,134,8,15,1,16,1,16,1,16,1,16,
1,16,1,16,1,16,1,17,1,17,1,17,1,17,1,17,1,17,1,17,1,17,3,17,151,8,17,1,
18,1,18,1,18,1,19,1,19,1,19,1,19,1,20,1,20,1,20,1,20,1,21,1,21,1,21,1,22,
1,22,1,22,1,22,1,22,1,22,1,22,1,22,1,22,1,23,1,23,1,23,1,23,1,24,1,24,1,
24,1,24,1,24,1,24,1,24,1,25,1,25,1,25,1,25,1,25,1,25,1,25,1,26,1,26,1,26,
1,26,1,26,1,26,1,26,1,27,1,27,1,27,1,27,1,27,1,27,1,27,1,27,1,27,3,27,210,
8,27,1,28,1,28,1,29,3,29,215,8,29,1,29,4,29,218,8,29,11,29,12,29,219,1,
29,1,29,5,29,224,8,29,10,29,12,29,227,9,29,3,29,229,8,29,1,29,1,29,3,29,
233,8,29,1,29,4,29,236,8,29,11,29,12,29,237,3,29,240,8,29,1,29,3,29,243,
8,29,1,29,1,29,4,29,247,8,29,11,29,12,29,248,1,29,1,29,3,29,253,8,29,1,
29,4,29,256,8,29,11,29,12,29,257,3,29,260,8,29,3,29,262,8,29,1,30,1,30,
1,30,1,30,5,30,268,8,30,10,30,12,30,271,9,30,1,30,1,30,1,30,1,30,1,30,5,
30,278,8,30,10,30,12,30,281,9,30,1,30,3,30,284,8,30,1,31,1,31,5,31,288,
8,31,10,31,12,31,291,9,31,1,32,1,32,1,32,1,33,1,33,1,33,1,33,1,34,1,34,
1,34,1,34,1,34,1,34,1,34,4,34,307,8,34,11,34,12,34,308,5,34,311,8,34,10,
34,12,34,314,9,34,1,35,4,35,317,8,35,11,35,12,35,318,1,35,1,35,1,36,1,36,
1,37,4,37,326,8,37,11,37,12,37,327,0,0,38,1,1,3,2,5,3,7,4,9,5,11,6,13,7,
15,8,17,9,19,10,21,11,23,12,25,13,27,14,29,15,31,16,33,17,35,18,37,19,39,
20,41,21,43,22,45,23,47,24,49,25,51,26,53,27,55,28,57,0,59,29,61,30,63,
0,65,0,67,0,69,31,71,32,73,0,75,33,1,0,29,2,0,76,76,108,108,2,0,73,73,105,
105,2,0,75,75,107,107,2,0,69,69,101,101,2,0,66,66,98,98,2,0,84,84,116,116,
2,0,87,87,119,119,2,0,78,78,110,110,2,0,88,88,120,120,2,0,83,83,115,115,
2,0,82,82,114,114,2,0,71,71,103,103,2,0,80,80,112,112,2,0,67,67,99,99,2,
0,79,79,111,111,2,0,65,65,97,97,2,0,68,68,100,100,2,0,72,72,104,104,2,0,
89,89,121,121,2,0,85,85,117,117,2,0,70,70,102,102,2,0,43,43,45,45,2,0,34,
34,92,92,2,0,39,39,92,92,4,0,35,36,64,90,95,95,97,123,7,0,35,36,45,45,47,
58,64,90,95,95,97,123,125,125,3,0,9,10,13,13,32,32,1,0,48,57,8,0,9,10,13,
13,32,34,39,41,44,44,60,62,91,91,93,93,353,0,1,1,0,0,0,0,3,1,0,0,0,0,5,
1,0,0,0,0,7,1,0,0,0,0,9,1,0,0,0,0,11,1,0,0,0,0,13,1,0,0,0,0,15,1,0,0,0,
0,17,1,0,0,0,0,19,1,0,0,0,0,21,1,0,0,0,0,23,1,0,0,0,0,25,1,0,0,0,0,27,1,
0,0,0,0,29,1,0,0,0,0,31,1,0,0,0,0,33,1,0,0,0,0,35,1,0,0,0,0,37,1,0,0,0,
0,39,1,0,0,0,0,41,1,0,0,0,0,43,1,0,0,0,0,45,1,0,0,0,0,47,1,0,0,0,0,49,1,
0,0,0,0,51,1,0,0,0,0,53,1,0,0,0,0,55,1,0,0,0,0,59,1,0,0,0,0,61,1,0,0,0,
0,69,1,0,0,0,0,71,1,0,0,0,0,75,1,0,0,0,1,77,1,0,0,0,3,79,1,0,0,0,5,81,1,
0,0,0,7,83,1,0,0,0,9,85,1,0,0,0,11,90,1,0,0,0,13,92,1,0,0,0,15,95,1,0,0,
0,17,98,1,0,0,0,19,100,1,0,0,0,21,103,1,0,0,0,23,105,1,0,0,0,25,108,1,0,
0,0,27,113,1,0,0,0,29,119,1,0,0,0,31,127,1,0,0,0,33,135,1,0,0,0,35,142,
1,0,0,0,37,152,1,0,0,0,39,155,1,0,0,0,41,159,1,0,0,0,43,163,1,0,0,0,45,
166,1,0,0,0,47,175,1,0,0,0,49,179,1,0,0,0,51,186,1,0,0,0,53,193,1,0,0,0,
55,209,1,0,0,0,57,211,1,0,0,0,59,261,1,0,0,0,61,283,1,0,0,0,63,285,1,0,
0,0,65,292,1,0,0,0,67,295,1,0,0,0,69,299,1,0,0,0,71,316,1,0,0,0,73,322,
1,0,0,0,75,325,1,0,0,0,77,78,5,40,0,0,78,2,1,0,0,0,79,80,5,41,0,0,80,4,
1,0,0,0,81,82,5,91,0,0,82,6,1,0,0,0,83,84,5,93,0,0,84,8,1,0,0,0,85,86,5,
44,0,0,86,10,1,0,0,0,87,91,5,61,0,0,88,89,5,61,0,0,89,91,5,61,0,0,90,87,
1,0,0,0,90,88,1,0,0,0,91,12,1,0,0,0,92,93,5,33,0,0,93,94,5,61,0,0,94,14,
1,0,0,0,95,96,5,60,0,0,96,97,5,62,0,0,97,16,1,0,0,0,98,99,5,60,0,0,99,18,
1,0,0,0,100,101,5,60,0,0,101,102,5,61,0,0,102,20,1,0,0,0,103,104,5,62,0,
0,104,22,1,0,0,0,105,106,5,62,0,0,106,107,5,61,0,0,107,24,1,0,0,0,108,109,
7,0,0,0,109,110,7,1,0,0,110,111,7,2,0,0,111,112,7,3,0,0,112,26,1,0,0,0,
113,114,7,1,0,0,114,115,7,0,0,0,115,116,7,1,0,0,116,117,7,2,0,0,117,118,
7,3,0,0,118,28,1,0,0,0,119,120,7,4,0,0,120,121,7,3,0,0,121,122,7,5,0,0,
122,123,7,6,0,0,123,124,7,3,0,0,124,125,7,3,0,0,125,126,7,7,0,0,126,30,
1,0,0,0,127,128,7,3,0,0,128,129,7,8,0,0,129,130,7,1,0,0,130,131,7,9,0,0,
131,133,7,5,0,0,132,134,7,9,0,0,133,132,1,0,0,0,133,134,1,0,0,0,134,32,
1,0,0,0,135,136,7,10,0,0,136,137,7,3,0,0,137,138,7,11,0,0,138,139,7,3,0,
0,139,140,7,8,0,0,140,141,7,12,0,0,141,34,1,0,0,0,142,143,7,13,0,0,143,
144,7,14,0,0,144,145,7,7,0,0,145,146,7,5,0,0,146,147,7,15,0,0,147,148,7,
1,0,0,148,150,7,7,0,0,149,151,7,9,0,0,150,149,1,0,0,0,150,151,1,0,0,0,151,
36,1,0,0,0,152,153,7,1,0,0,153,154,7,7,0,0,154,38,1,0,0,0,155,156,7,7,0,
0,156,157,7,14,0,0,157,158,7,5,0,0,158,40,1,0,0,0,159,160,7,15,0,0,160,
161,7,7,0,0,161,162,7,16,0,0,162,42,1,0,0,0,163,164,7,14,0,0,164,165,7,
10,0,0,165,44,1,0,0,0,166,167,7,17,0,0,167,168,7,15,0,0,168,169,7,9,0,0,
169,170,7,5,0,0,170,171,7,14,0,0,171,172,7,2,0,0,172,173,7,3,0,0,173,174,
7,7,0,0,174,46,1,0,0,0,175,176,7,17,0,0,176,177,7,15,0,0,177,178,7,9,0,
0,178,48,1,0,0,0,179,180,7,17,0,0,180,181,7,15,0,0,181,182,7,9,0,0,182,
183,7,15,0,0,183,184,7,7,0,0,184,185,7,18,0,0,185,50,1,0,0,0,186,187,7,
17,0,0,187,188,7,15,0,0,188,189,7,9,0,0,189,190,7,15,0,0,190,191,7,0,0,
0,191,192,7,0,0,0,192,52,1,0,0,0,193,194,7,9,0,0,194,195,7,3,0,0,195,196,
7,15,0,0,196,197,7,10,0,0,197,198,7,13,0,0,198,199,7,17,0,0,199,54,1,0,
0,0,200,201,7,5,0,0,201,202,7,10,0,0,202,203,7,19,0,0,203,210,7,3,0,0,204,
205,7,20,0,0,205,206,7,15,0,0,206,207,7,0,0,0,207,208,7,9,0,0,208,210,7,
3,0,0,209,200,1,0,0,0,209,204,1,0,0,0,210,56,1,0,0,0,211,212,7,21,0,0,212,
58,1,0,0,0,213,215,3,57,28,0,214,213,1,0,0,0,214,215,1,0,0,0,215,217,1,
0,0,0,216,218,3,73,36,0,217,216,1,0,0,0,218,219,1,0,0,0,219,217,1,0,0,0,
219,220,1,0,0,0,220,228,1,0,0,0,221,225,5,46,0,0,222,224,3,73,36,0,223,
222,1,0,0,0,224,227,1,0,0,0,225,223,1,0,0,0,225,226,1,0,0,0,226,229,1,0,
0,0,227,225,1,0,0,0,228,221,1,0,0,0,228,229,1,0,0,0,229,239,1,0,0,0,230,
232,7,3,0,0,231,233,3,57,28,0,232,231,1,0,0,0,232,233,1,0,0,0,233,235,1,
0,0,0,234,236,3,73,36,0,235,234,1,0,0,0,236,237,1,0,0,0,237,235,1,0,0,0,
237,238,1,0,0,0,238,240,1,0,0,0,239,230,1,0,0,0,239,240,1,0,0,0,240,262,
1,0,0,0,241,243,3,57,28,0,242,241,1,0,0,0,242,243,1,0,0,0,243,244,1,0,0,
0,244,246,5,46,0,0,245,247,3,73,36,0,246,245,1,0,0,0,247,248,1,0,0,0,248,
246,1,0,0,0,248,249,1,0,0,0,249,259,1,0,0,0,250,252,7,3,0,0,251,253,3,57,
28,0,252,251,1,0,0,0,252,253,1,0,0,0,253,255,1,0,0,0,254,256,3,73,36,0,
255,254,1,0,0,0,256,257,1,0,0,0,257,255,1,0,0,0,257,258,1,0,0,0,258,260,
1,0,0,0,259,250,1,0,0,0,259,260,1,0,0,0,260,262,1,0,0,0,261,214,1,0,0,0,
261,242,1,0,0,0,262,60,1,0,0,0,263,269,5,34,0,0,264,268,8,22,0,0,265,266,
5,92,0,0,266,268,9,0,0,0,267,264,1,0,0,0,267,265,1,0,0,0,268,271,1,0,0,
0,269,267,1,0,0,0,269,270,1,0,0,0,270,272,1,0,0,0,271,269,1,0,0,0,272,284,
5,34,0,0,273,279,5,39,0,0,274,278,8,23,0,0,275,276,5,92,0,0,276,278,9,0,
0,0,277,274,1,0,0,0,277,275,1,0,0,0,278,281,1,0,0,0,279,277,1,0,0,0,279,
280,1,0,0,0,280,282,1,0,0,0,281,279,1,0,0,0,282,284,5,39,0,0,283,263,1,
0,0,0,283,273,1,0,0,0,284,62,1,0,0,0,285,289,7,24,0,0,286,288,7,25,0,0,
287,286,1,0,0,0,288,291,1,0,0,0,289,287,1,0,0,0,289,290,1,0,0,0,290,64,
1,0,0,0,291,289,1,0,0,0,292,293,5,91,0,0,293,294,5,93,0,0,294,66,1,0,0,
0,295,296,5,91,0,0,296,297,5,42,0,0,297,298,5,93,0,0,298,68,1,0,0,0,299,
312,3,63,31,0,300,301,5,46,0,0,301,311,3,63,31,0,302,311,3,65,32,0,303,
311,3,67,33,0,304,306,5,46,0,0,305,307,3,73,36,0,306,305,1,0,0,0,307,308,
1,0,0,0,308,306,1,0,0,0,308,309,1,0,0,0,309,311,1,0,0,0,310,300,1,0,0,0,
310,302,1,0,0,0,310,303,1,0,0,0,310,304,1,0,0,0,311,314,1,0,0,0,312,310,
1,0,0,0,312,313,1,0,0,0,313,70,1,0,0,0,314,312,1,0,0,0,315,317,7,26,0,0,
316,315,1,0,0,0,317,318,1,0,0,0,318,316,1,0,0,0,318,319,1,0,0,0,319,320,
1,0,0,0,320,321,6,35,0,0,321,72,1,0,0,0,322,323,7,27,0,0,323,74,1,0,0,0,
324,326,8,28,0,0,325,324,1,0,0,0,326,327,1,0,0,0,327,325,1,0,0,0,327,328,
1,0,0,0,328,76,1,0,0,0,29,0,90,133,150,209,214,219,225,228,232,237,239,
242,248,252,257,259,261,267,269,277,279,283,289,308,310,312,318,327,1,6,
0,0];
31,7,31,2,32,7,32,2,33,7,33,2,34,7,34,2,35,7,35,2,36,7,36,1,0,1,0,1,1,1,
1,1,2,1,2,1,3,1,3,1,4,1,4,1,5,1,5,1,5,3,5,89,8,5,1,6,1,6,1,6,1,7,1,7,1,
7,1,8,1,8,1,9,1,9,1,9,1,10,1,10,1,11,1,11,1,11,1,12,1,12,1,12,1,12,1,12,
1,13,1,13,1,13,1,13,1,13,1,13,1,14,1,14,1,14,1,14,1,14,1,14,1,14,1,14,1,
15,1,15,1,15,1,15,1,15,1,15,3,15,132,8,15,1,16,1,16,1,16,1,16,1,16,1,16,
1,16,1,17,1,17,1,17,1,17,1,17,1,17,1,17,1,17,3,17,149,8,17,1,18,1,18,1,
18,1,19,1,19,1,19,1,19,1,20,1,20,1,20,1,20,1,21,1,21,1,21,1,22,1,22,1,22,
1,22,1,22,1,22,1,22,1,22,1,22,1,23,1,23,1,23,1,23,1,24,1,24,1,24,1,24,1,
24,1,24,1,24,1,25,1,25,1,25,1,25,1,25,1,25,1,25,1,26,1,26,1,26,1,26,1,26,
1,26,1,26,1,26,1,26,3,26,201,8,26,1,27,1,27,1,28,3,28,206,8,28,1,28,4,28,
209,8,28,11,28,12,28,210,1,28,1,28,5,28,215,8,28,10,28,12,28,218,9,28,3,
28,220,8,28,1,28,1,28,3,28,224,8,28,1,28,4,28,227,8,28,11,28,12,28,228,
3,28,231,8,28,1,28,3,28,234,8,28,1,28,1,28,4,28,238,8,28,11,28,12,28,239,
1,28,1,28,3,28,244,8,28,1,28,4,28,247,8,28,11,28,12,28,248,3,28,251,8,28,
3,28,253,8,28,1,29,1,29,1,29,1,29,5,29,259,8,29,10,29,12,29,262,9,29,1,
29,1,29,1,29,1,29,1,29,5,29,269,8,29,10,29,12,29,272,9,29,1,29,3,29,275,
8,29,1,30,1,30,5,30,279,8,30,10,30,12,30,282,9,30,1,31,1,31,1,31,1,32,1,
32,1,32,1,32,1,33,1,33,1,33,1,33,1,33,1,33,1,33,4,33,298,8,33,11,33,12,
33,299,5,33,302,8,33,10,33,12,33,305,9,33,1,34,4,34,308,8,34,11,34,12,34,
309,1,34,1,34,1,35,1,35,1,36,4,36,317,8,36,11,36,12,36,318,0,0,37,1,1,3,
2,5,3,7,4,9,5,11,6,13,7,15,8,17,9,19,10,21,11,23,12,25,13,27,14,29,15,31,
16,33,17,35,18,37,19,39,20,41,21,43,22,45,23,47,24,49,25,51,26,53,27,55,
0,57,28,59,29,61,0,63,0,65,0,67,30,69,31,71,0,73,32,1,0,29,2,0,76,76,108,
108,2,0,73,73,105,105,2,0,75,75,107,107,2,0,69,69,101,101,2,0,66,66,98,
98,2,0,84,84,116,116,2,0,87,87,119,119,2,0,78,78,110,110,2,0,88,88,120,
120,2,0,83,83,115,115,2,0,82,82,114,114,2,0,71,71,103,103,2,0,80,80,112,
112,2,0,67,67,99,99,2,0,79,79,111,111,2,0,65,65,97,97,2,0,68,68,100,100,
2,0,72,72,104,104,2,0,89,89,121,121,2,0,85,85,117,117,2,0,70,70,102,102,
2,0,43,43,45,45,2,0,34,34,92,92,2,0,39,39,92,92,4,0,35,36,64,90,95,95,97,
123,7,0,35,36,45,45,47,58,64,90,95,95,97,123,125,125,3,0,9,10,13,13,32,
32,1,0,48,57,8,0,9,10,13,13,32,34,39,41,44,44,60,62,91,91,93,93,344,0,1,
1,0,0,0,0,3,1,0,0,0,0,5,1,0,0,0,0,7,1,0,0,0,0,9,1,0,0,0,0,11,1,0,0,0,0,
13,1,0,0,0,0,15,1,0,0,0,0,17,1,0,0,0,0,19,1,0,0,0,0,21,1,0,0,0,0,23,1,0,
0,0,0,25,1,0,0,0,0,27,1,0,0,0,0,29,1,0,0,0,0,31,1,0,0,0,0,33,1,0,0,0,0,
35,1,0,0,0,0,37,1,0,0,0,0,39,1,0,0,0,0,41,1,0,0,0,0,43,1,0,0,0,0,45,1,0,
0,0,0,47,1,0,0,0,0,49,1,0,0,0,0,51,1,0,0,0,0,53,1,0,0,0,0,57,1,0,0,0,0,
59,1,0,0,0,0,67,1,0,0,0,0,69,1,0,0,0,0,73,1,0,0,0,1,75,1,0,0,0,3,77,1,0,
0,0,5,79,1,0,0,0,7,81,1,0,0,0,9,83,1,0,0,0,11,88,1,0,0,0,13,90,1,0,0,0,
15,93,1,0,0,0,17,96,1,0,0,0,19,98,1,0,0,0,21,101,1,0,0,0,23,103,1,0,0,0,
25,106,1,0,0,0,27,111,1,0,0,0,29,117,1,0,0,0,31,125,1,0,0,0,33,133,1,0,
0,0,35,140,1,0,0,0,37,150,1,0,0,0,39,153,1,0,0,0,41,157,1,0,0,0,43,161,
1,0,0,0,45,164,1,0,0,0,47,173,1,0,0,0,49,177,1,0,0,0,51,184,1,0,0,0,53,
200,1,0,0,0,55,202,1,0,0,0,57,252,1,0,0,0,59,274,1,0,0,0,61,276,1,0,0,0,
63,283,1,0,0,0,65,286,1,0,0,0,67,290,1,0,0,0,69,307,1,0,0,0,71,313,1,0,
0,0,73,316,1,0,0,0,75,76,5,40,0,0,76,2,1,0,0,0,77,78,5,41,0,0,78,4,1,0,
0,0,79,80,5,91,0,0,80,6,1,0,0,0,81,82,5,93,0,0,82,8,1,0,0,0,83,84,5,44,
0,0,84,10,1,0,0,0,85,89,5,61,0,0,86,87,5,61,0,0,87,89,5,61,0,0,88,85,1,
0,0,0,88,86,1,0,0,0,89,12,1,0,0,0,90,91,5,33,0,0,91,92,5,61,0,0,92,14,1,
0,0,0,93,94,5,60,0,0,94,95,5,62,0,0,95,16,1,0,0,0,96,97,5,60,0,0,97,18,
1,0,0,0,98,99,5,60,0,0,99,100,5,61,0,0,100,20,1,0,0,0,101,102,5,62,0,0,
102,22,1,0,0,0,103,104,5,62,0,0,104,105,5,61,0,0,105,24,1,0,0,0,106,107,
7,0,0,0,107,108,7,1,0,0,108,109,7,2,0,0,109,110,7,3,0,0,110,26,1,0,0,0,
111,112,7,1,0,0,112,113,7,0,0,0,113,114,7,1,0,0,114,115,7,2,0,0,115,116,
7,3,0,0,116,28,1,0,0,0,117,118,7,4,0,0,118,119,7,3,0,0,119,120,7,5,0,0,
120,121,7,6,0,0,121,122,7,3,0,0,122,123,7,3,0,0,123,124,7,7,0,0,124,30,
1,0,0,0,125,126,7,3,0,0,126,127,7,8,0,0,127,128,7,1,0,0,128,129,7,9,0,0,
129,131,7,5,0,0,130,132,7,9,0,0,131,130,1,0,0,0,131,132,1,0,0,0,132,32,
1,0,0,0,133,134,7,10,0,0,134,135,7,3,0,0,135,136,7,11,0,0,136,137,7,3,0,
0,137,138,7,8,0,0,138,139,7,12,0,0,139,34,1,0,0,0,140,141,7,13,0,0,141,
142,7,14,0,0,142,143,7,7,0,0,143,144,7,5,0,0,144,145,7,15,0,0,145,146,7,
1,0,0,146,148,7,7,0,0,147,149,7,9,0,0,148,147,1,0,0,0,148,149,1,0,0,0,149,
36,1,0,0,0,150,151,7,1,0,0,151,152,7,7,0,0,152,38,1,0,0,0,153,154,7,7,0,
0,154,155,7,14,0,0,155,156,7,5,0,0,156,40,1,0,0,0,157,158,7,15,0,0,158,
159,7,7,0,0,159,160,7,16,0,0,160,42,1,0,0,0,161,162,7,14,0,0,162,163,7,
10,0,0,163,44,1,0,0,0,164,165,7,17,0,0,165,166,7,15,0,0,166,167,7,9,0,0,
167,168,7,5,0,0,168,169,7,14,0,0,169,170,7,2,0,0,170,171,7,3,0,0,171,172,
7,7,0,0,172,46,1,0,0,0,173,174,7,17,0,0,174,175,7,15,0,0,175,176,7,9,0,
0,176,48,1,0,0,0,177,178,7,17,0,0,178,179,7,15,0,0,179,180,7,9,0,0,180,
181,7,15,0,0,181,182,7,7,0,0,182,183,7,18,0,0,183,50,1,0,0,0,184,185,7,
17,0,0,185,186,7,15,0,0,186,187,7,9,0,0,187,188,7,15,0,0,188,189,7,0,0,
0,189,190,7,0,0,0,190,52,1,0,0,0,191,192,7,5,0,0,192,193,7,10,0,0,193,194,
7,19,0,0,194,201,7,3,0,0,195,196,7,20,0,0,196,197,7,15,0,0,197,198,7,0,
0,0,198,199,7,9,0,0,199,201,7,3,0,0,200,191,1,0,0,0,200,195,1,0,0,0,201,
54,1,0,0,0,202,203,7,21,0,0,203,56,1,0,0,0,204,206,3,55,27,0,205,204,1,
0,0,0,205,206,1,0,0,0,206,208,1,0,0,0,207,209,3,71,35,0,208,207,1,0,0,0,
209,210,1,0,0,0,210,208,1,0,0,0,210,211,1,0,0,0,211,219,1,0,0,0,212,216,
5,46,0,0,213,215,3,71,35,0,214,213,1,0,0,0,215,218,1,0,0,0,216,214,1,0,
0,0,216,217,1,0,0,0,217,220,1,0,0,0,218,216,1,0,0,0,219,212,1,0,0,0,219,
220,1,0,0,0,220,230,1,0,0,0,221,223,7,3,0,0,222,224,3,55,27,0,223,222,1,
0,0,0,223,224,1,0,0,0,224,226,1,0,0,0,225,227,3,71,35,0,226,225,1,0,0,0,
227,228,1,0,0,0,228,226,1,0,0,0,228,229,1,0,0,0,229,231,1,0,0,0,230,221,
1,0,0,0,230,231,1,0,0,0,231,253,1,0,0,0,232,234,3,55,27,0,233,232,1,0,0,
0,233,234,1,0,0,0,234,235,1,0,0,0,235,237,5,46,0,0,236,238,3,71,35,0,237,
236,1,0,0,0,238,239,1,0,0,0,239,237,1,0,0,0,239,240,1,0,0,0,240,250,1,0,
0,0,241,243,7,3,0,0,242,244,3,55,27,0,243,242,1,0,0,0,243,244,1,0,0,0,244,
246,1,0,0,0,245,247,3,71,35,0,246,245,1,0,0,0,247,248,1,0,0,0,248,246,1,
0,0,0,248,249,1,0,0,0,249,251,1,0,0,0,250,241,1,0,0,0,250,251,1,0,0,0,251,
253,1,0,0,0,252,205,1,0,0,0,252,233,1,0,0,0,253,58,1,0,0,0,254,260,5,34,
0,0,255,259,8,22,0,0,256,257,5,92,0,0,257,259,9,0,0,0,258,255,1,0,0,0,258,
256,1,0,0,0,259,262,1,0,0,0,260,258,1,0,0,0,260,261,1,0,0,0,261,263,1,0,
0,0,262,260,1,0,0,0,263,275,5,34,0,0,264,270,5,39,0,0,265,269,8,23,0,0,
266,267,5,92,0,0,267,269,9,0,0,0,268,265,1,0,0,0,268,266,1,0,0,0,269,272,
1,0,0,0,270,268,1,0,0,0,270,271,1,0,0,0,271,273,1,0,0,0,272,270,1,0,0,0,
273,275,5,39,0,0,274,254,1,0,0,0,274,264,1,0,0,0,275,60,1,0,0,0,276,280,
7,24,0,0,277,279,7,25,0,0,278,277,1,0,0,0,279,282,1,0,0,0,280,278,1,0,0,
0,280,281,1,0,0,0,281,62,1,0,0,0,282,280,1,0,0,0,283,284,5,91,0,0,284,285,
5,93,0,0,285,64,1,0,0,0,286,287,5,91,0,0,287,288,5,42,0,0,288,289,5,93,
0,0,289,66,1,0,0,0,290,303,3,61,30,0,291,292,5,46,0,0,292,302,3,61,30,0,
293,302,3,63,31,0,294,302,3,65,32,0,295,297,5,46,0,0,296,298,3,71,35,0,
297,296,1,0,0,0,298,299,1,0,0,0,299,297,1,0,0,0,299,300,1,0,0,0,300,302,
1,0,0,0,301,291,1,0,0,0,301,293,1,0,0,0,301,294,1,0,0,0,301,295,1,0,0,0,
302,305,1,0,0,0,303,301,1,0,0,0,303,304,1,0,0,0,304,68,1,0,0,0,305,303,
1,0,0,0,306,308,7,26,0,0,307,306,1,0,0,0,308,309,1,0,0,0,309,307,1,0,0,
0,309,310,1,0,0,0,310,311,1,0,0,0,311,312,6,34,0,0,312,70,1,0,0,0,313,314,
7,27,0,0,314,72,1,0,0,0,315,317,8,28,0,0,316,315,1,0,0,0,317,318,1,0,0,
0,318,316,1,0,0,0,318,319,1,0,0,0,319,74,1,0,0,0,29,0,88,131,148,200,205,
210,216,219,223,228,230,233,239,243,248,250,252,258,260,268,270,274,280,
299,301,303,309,318,1,6,0,0];
private static __ATN: ATN;
public static get _ATN(): ATN {

View File

@@ -1,25 +1,25 @@
// Generated from grammar/FilterQuery.g4 by ANTLR 4.13.2
// Generated from FilterQuery.g4 by ANTLR 4.13.1
import {ParseTreeListener} from "antlr4";
import { QueryContext } from "./FilterQueryParser.js";
import { ExpressionContext } from "./FilterQueryParser.js";
import { OrExpressionContext } from "./FilterQueryParser.js";
import { AndExpressionContext } from "./FilterQueryParser.js";
import { UnaryExpressionContext } from "./FilterQueryParser.js";
import { PrimaryContext } from "./FilterQueryParser.js";
import { ComparisonContext } from "./FilterQueryParser.js";
import { InClauseContext } from "./FilterQueryParser.js";
import { NotInClauseContext } from "./FilterQueryParser.js";
import { ValueListContext } from "./FilterQueryParser.js";
import { FullTextContext } from "./FilterQueryParser.js";
import { FunctionCallContext } from "./FilterQueryParser.js";
import { FunctionParamListContext } from "./FilterQueryParser.js";
import { FunctionParamContext } from "./FilterQueryParser.js";
import { ArrayContext } from "./FilterQueryParser.js";
import { ValueContext } from "./FilterQueryParser.js";
import { KeyContext } from "./FilterQueryParser.js";
import { QueryContext } from "./FilterQueryParser";
import { ExpressionContext } from "./FilterQueryParser";
import { OrExpressionContext } from "./FilterQueryParser";
import { AndExpressionContext } from "./FilterQueryParser";
import { UnaryExpressionContext } from "./FilterQueryParser";
import { PrimaryContext } from "./FilterQueryParser";
import { ComparisonContext } from "./FilterQueryParser";
import { InClauseContext } from "./FilterQueryParser";
import { NotInClauseContext } from "./FilterQueryParser";
import { ValueListContext } from "./FilterQueryParser";
import { FullTextContext } from "./FilterQueryParser";
import { FunctionCallContext } from "./FilterQueryParser";
import { FunctionParamListContext } from "./FilterQueryParser";
import { FunctionParamContext } from "./FilterQueryParser";
import { ArrayContext } from "./FilterQueryParser";
import { ValueContext } from "./FilterQueryParser";
import { KeyContext } from "./FilterQueryParser";
/**

View File

@@ -1,4 +1,4 @@
// Generated from grammar/FilterQuery.g4 by ANTLR 4.13.2
// Generated from FilterQuery.g4 by ANTLR 4.13.1
// noinspection ES6UnusedImports,JSUnusedGlobalSymbols,JSUnusedLocalSymbols
import {
@@ -45,14 +45,13 @@ export default class FilterQueryParser extends Parser {
public static readonly HAS = 24;
public static readonly HASANY = 25;
public static readonly HASALL = 26;
public static readonly SEARCH = 27;
public static readonly BOOL = 28;
public static readonly NUMBER = 29;
public static readonly QUOTED_TEXT = 30;
public static readonly KEY = 31;
public static readonly WS = 32;
public static readonly FREETEXT = 33;
public static override readonly EOF = Token.EOF;
public static readonly BOOL = 27;
public static readonly NUMBER = 28;
public static readonly QUOTED_TEXT = 29;
public static readonly KEY = 30;
public static readonly WS = 31;
public static readonly FREETEXT = 32;
public static readonly EOF = Token.EOF;
public static readonly RULE_query = 0;
public static readonly RULE_expression = 1;
public static readonly RULE_orExpression = 2;
@@ -91,9 +90,8 @@ export default class FilterQueryParser extends Parser {
"AND", "OR",
"HASTOKEN",
"HAS", "HASANY",
"HASALL", "SEARCH",
"BOOL", "NUMBER",
"QUOTED_TEXT",
"HASALL", "BOOL",
"NUMBER", "QUOTED_TEXT",
"KEY", "WS",
"FREETEXT" ];
// tslint:disable:no-trailing-whitespace
@@ -224,7 +222,7 @@ export default class FilterQueryParser extends Parser {
this.state = 53;
this._errHandler.sync(this);
_la = this._input.LA(1);
while ((((_la) & ~0x1F) === 0 && ((1 << _la) & 4289724418) !== 0) || _la===33) {
while (((((_la - 1)) & ~0x1F) === 0 && ((1 << (_la - 1)) & 3218604033) !== 0)) {
{
this.state = 51;
this._errHandler.sync(this);
@@ -247,8 +245,7 @@ export default class FilterQueryParser extends Parser {
case 28:
case 29:
case 30:
case 31:
case 33:
case 32:
{
this.state = 50;
this.unaryExpression();
@@ -814,7 +811,7 @@ export default class FilterQueryParser extends Parser {
{
this.state = 190;
_la = this._input.LA(1);
if(!(_la===30 || _la===33)) {
if(!(_la===29 || _la===32)) {
this._errHandler.recoverInline(this);
}
else {
@@ -847,7 +844,7 @@ export default class FilterQueryParser extends Parser {
{
this.state = 192;
_la = this._input.LA(1);
if(!((((_la) & ~0x1F) === 0 && ((1 << _la) & 260046848) !== 0))) {
if(!((((_la) & ~0x1F) === 0 && ((1 << _la) & 125829120) !== 0))) {
this._errHandler.recoverInline(this);
}
else {
@@ -1002,7 +999,7 @@ export default class FilterQueryParser extends Parser {
{
this.state = 214;
_la = this._input.LA(1);
if(!((((_la) & ~0x1F) === 0 && ((1 << _la) & 4026531840) !== 0))) {
if(!((((_la) & ~0x1F) === 0 && ((1 << _la) & 2013265920) !== 0))) {
this._errHandler.recoverInline(this);
}
else {
@@ -1051,7 +1048,7 @@ export default class FilterQueryParser extends Parser {
return localctx;
}
public static readonly _serializedATN: number[] = [4,1,33,219,2,0,7,0,2,
public static readonly _serializedATN: number[] = [4,1,32,219,2,0,7,0,2,
1,7,1,2,2,7,2,2,3,7,3,2,4,7,4,2,5,7,5,2,6,7,6,2,7,7,7,2,8,7,8,2,9,7,9,2,
10,7,10,2,11,7,11,2,12,7,12,2,13,7,13,2,14,7,14,2,15,7,15,2,16,7,16,1,0,
1,0,1,0,1,1,1,1,1,2,1,2,1,2,5,2,43,8,2,10,2,12,2,46,9,2,1,3,1,3,1,3,1,3,
@@ -1066,7 +1063,7 @@ export default class FilterQueryParser extends Parser {
10,9,12,9,189,9,9,1,10,1,10,1,11,1,11,1,11,1,11,1,11,1,12,1,12,1,12,5,12,
201,8,12,10,12,12,12,204,9,12,1,13,1,13,1,13,3,13,209,8,13,1,14,1,14,1,
14,1,14,1,15,1,15,1,16,1,16,1,16,0,0,17,0,2,4,6,8,10,12,14,16,18,20,22,
24,26,28,30,32,0,5,1,0,7,8,1,0,13,14,2,0,30,30,33,33,1,0,23,27,1,0,28,31,
24,26,28,30,32,0,5,1,0,7,8,1,0,13,14,2,0,29,29,32,32,1,0,23,26,1,0,27,30,
235,0,34,1,0,0,0,2,37,1,0,0,0,4,39,1,0,0,0,6,47,1,0,0,0,8,57,1,0,0,0,10,
70,1,0,0,0,12,149,1,0,0,0,14,163,1,0,0,0,16,180,1,0,0,0,18,182,1,0,0,0,
20,190,1,0,0,0,22,192,1,0,0,0,24,197,1,0,0,0,26,208,1,0,0,0,28,210,1,0,
@@ -1118,7 +1115,7 @@ export default class FilterQueryParser extends Parser {
0,0,205,209,3,32,16,0,206,209,3,30,15,0,207,209,3,28,14,0,208,205,1,0,0,
0,208,206,1,0,0,0,208,207,1,0,0,0,209,27,1,0,0,0,210,211,5,3,0,0,211,212,
3,18,9,0,212,213,5,4,0,0,213,29,1,0,0,0,214,215,7,4,0,0,215,31,1,0,0,0,
216,217,5,31,0,0,217,33,1,0,0,0,11,44,51,53,57,70,149,163,180,187,202,208];
216,217,5,30,0,0,217,33,1,0,0,0,11,44,51,53,57,70,149,163,180,187,202,208];
private static __ATN: ATN;
public static get _ATN(): ATN {
@@ -1665,9 +1662,6 @@ export class FunctionCallContext extends ParserRuleContext {
public HASALL(): TerminalNode {
return this.getToken(FilterQueryParser.HASALL, 0);
}
public SEARCH(): TerminalNode {
return this.getToken(FilterQueryParser.SEARCH, 0);
}
public get ruleIndex(): number {
return FilterQueryParser.RULE_functionCall;
}

View File

@@ -1,25 +1,25 @@
// Generated from grammar/FilterQuery.g4 by ANTLR 4.13.2
// Generated from FilterQuery.g4 by ANTLR 4.13.1
import {ParseTreeVisitor} from 'antlr4';
import { QueryContext } from "./FilterQueryParser.js";
import { ExpressionContext } from "./FilterQueryParser.js";
import { OrExpressionContext } from "./FilterQueryParser.js";
import { AndExpressionContext } from "./FilterQueryParser.js";
import { UnaryExpressionContext } from "./FilterQueryParser.js";
import { PrimaryContext } from "./FilterQueryParser.js";
import { ComparisonContext } from "./FilterQueryParser.js";
import { InClauseContext } from "./FilterQueryParser.js";
import { NotInClauseContext } from "./FilterQueryParser.js";
import { ValueListContext } from "./FilterQueryParser.js";
import { FullTextContext } from "./FilterQueryParser.js";
import { FunctionCallContext } from "./FilterQueryParser.js";
import { FunctionParamListContext } from "./FilterQueryParser.js";
import { FunctionParamContext } from "./FilterQueryParser.js";
import { ArrayContext } from "./FilterQueryParser.js";
import { ValueContext } from "./FilterQueryParser.js";
import { KeyContext } from "./FilterQueryParser.js";
import { QueryContext } from "./FilterQueryParser";
import { ExpressionContext } from "./FilterQueryParser";
import { OrExpressionContext } from "./FilterQueryParser";
import { AndExpressionContext } from "./FilterQueryParser";
import { UnaryExpressionContext } from "./FilterQueryParser";
import { PrimaryContext } from "./FilterQueryParser";
import { ComparisonContext } from "./FilterQueryParser";
import { InClauseContext } from "./FilterQueryParser";
import { NotInClauseContext } from "./FilterQueryParser";
import { ValueListContext } from "./FilterQueryParser";
import { FullTextContext } from "./FilterQueryParser";
import { FunctionCallContext } from "./FilterQueryParser";
import { FunctionParamListContext } from "./FilterQueryParser";
import { FunctionParamContext } from "./FilterQueryParser";
import { ArrayContext } from "./FilterQueryParser";
import { ValueContext } from "./FilterQueryParser";
import { KeyContext } from "./FilterQueryParser";
/**

View File

@@ -107,7 +107,7 @@ fullText
* ...
*/
functionCall
: (HASTOKEN | HAS | HASANY | HASALL | SEARCH) LPAREN functionParamList RPAREN
: (HASTOKEN | HAS | HASANY | HASALL) LPAREN functionParamList RPAREN
;
// Function parameters can be keys, single scalar values, or arrays
@@ -184,7 +184,6 @@ HASTOKEN : [Hh][Aa][Ss][Tt][Oo][Kk][Ee][Nn];
HAS : [Hh][Aa][Ss] ;
HASANY : [Hh][Aa][Ss][Aa][Nn][Yy] ;
HASALL : [Hh][Aa][Ss][Aa][Ll][Ll] ;
SEARCH : [Ss][Ee][Aa][Rr][Cc][Hh] ;
// Potential boolean constants
BOOL

View File

@@ -4,8 +4,6 @@ import (
"errors" //nolint:depguard
"fmt"
"log/slog"
"go.opentelemetry.io/otel/attribute"
)
// base is the fundamental struct that implements the error interface.
@@ -255,10 +253,3 @@ func NewTimeoutf(code Code, format string, args ...any) *base {
func Attr(err error) slog.Attr {
return slog.Any("exception", err)
}
// TypeAttr returns an OTel attribute.KeyValue with the "error.type" semconv key
// set to the error's type string.
func TypeAttr(err error) attribute.KeyValue {
t, _, _, _, _, _ := Unwrapb(err)
return attribute.String("error.type", t.String())
}

View File

@@ -8,7 +8,6 @@ var (
FeatureHideRootUser = featuretypes.MustNewName("hide_root_user")
FeatureGetMetersFromZeus = featuretypes.MustNewName("get_meters_from_zeus")
FeaturePutMetersInZeus = featuretypes.MustNewName("put_meters_in_zeus")
FeatureUseMeterReporter = featuretypes.MustNewName("use_meter_reporter")
FeatureUseJSONBody = featuretypes.MustNewName("use_json_body")
)
@@ -54,14 +53,6 @@ func MustNewRegistry() featuretypes.Registry {
DefaultVariant: featuretypes.MustNewName("disabled"),
Variants: featuretypes.NewBooleanVariants(),
},
&featuretypes.Feature{
Name: FeatureUseMeterReporter,
Kind: featuretypes.KindBoolean,
Stage: featuretypes.StageExperimental,
Description: "Controls whether the enterprise meter reporter runs instead of the noop reporter",
DefaultVariant: featuretypes.MustNewName("disabled"),
Variants: featuretypes.NewBooleanVariants(),
},
&featuretypes.Feature{
Name: FeatureUseJSONBody,
Kind: featuretypes.KindBoolean,

View File

@@ -1,68 +0,0 @@
package metercollector
import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types/zeustypes"
)
const (
ProviderStatic = "static"
ProviderTelemetry = "telemetry"
)
type Config struct {
Provider string `mapstructure:"provider"`
Telemetry TelemetryConfig `mapstructure:"telemetry"`
Static StaticConfig `mapstructure:"static"`
}
func (c Config) Validate() error {
switch c.Provider {
case ProviderStatic:
return c.Static.Validate()
case ProviderTelemetry:
return c.Telemetry.Validate()
default:
return errors.Newf(errors.TypeInvalidInput, ErrCodeInvalidConfig, "meter collector: unknown provider %q", c.Provider)
}
}
type TelemetryConfig struct {
Name zeustypes.MeterName
Unit zeustypes.MeterUnit
Aggregation zeustypes.MeterAggregation
DBName string
TableName string
DefaultRetentionDays int
}
type StaticConfig struct {
Name zeustypes.MeterName
Unit zeustypes.MeterUnit
Aggregation zeustypes.MeterAggregation
Value int64
}
func (c StaticConfig) Validate() error {
if c.Name.IsZero() {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidConfig, "static meter collector: name must be set")
}
return nil
}
func (c TelemetryConfig) Validate() error {
if c.Name.IsZero() {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidConfig, "telemetry meter collector: name must be set")
}
if c.DBName == "" || c.TableName == "" {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidConfig, "telemetry meter collector: db_name and table_name are required")
}
if c.DefaultRetentionDays <= 0 {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidConfig, "telemetry meter collector: default_retention_days must be positive")
}
return nil
}

View File

@@ -1,25 +0,0 @@
package metercollector
import (
"context"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types/licensetypes"
"github.com/SigNoz/signoz/pkg/types/zeustypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
var (
ErrCodeMeterCollectorCollectFailed = errors.MustNewCode("meter_collector_collect_failed")
ErrCodeMeterCollectorInvalidCustomRetentionRule = errors.MustNewCode("meter_collector_invalid_custom_retention_rule")
ErrCodeInvalidConfig = errors.MustNewCode("meter_collector_invalid_config")
)
type MeterCollector interface {
Name() zeustypes.MeterName
Unit() zeustypes.MeterUnit
Aggregation() zeustypes.MeterAggregation
Origin(ctx context.Context, orgID valuer.UUID, license *licensetypes.License, todayStart time.Time) (time.Time, error)
Collect(ctx context.Context, orgID valuer.UUID, license *licensetypes.License, window zeustypes.MeterWindow) ([]zeustypes.Meter, error)
}

View File

@@ -1,37 +0,0 @@
package meterreporter
import (
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
)
var _ factory.Config = (*Config)(nil)
type Config struct {
// Interval is how often the reporter collects and ships meters.
Interval time.Duration `mapstructure:"interval"`
// Backfill enables sealed-day catch-up from the license creation day.
Backfill bool `mapstructure:"backfill"`
}
func newConfig() factory.Config {
return Config{
Interval: 6 * time.Hour,
Backfill: true,
}
}
func NewConfigFactory() factory.ConfigFactory {
return factory.NewConfigFactory(factory.MustNewName("meterreporter"), newConfig)
}
func (c Config) Validate() error {
if c.Interval < 5*time.Minute || c.Interval > 24*time.Hour {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput, "meterreporter::interval must be between 5m and 24h")
}
return nil
}

View File

@@ -1,14 +0,0 @@
package meterreporter
import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
)
var (
ErrCodeInvalidInput = errors.MustNewCode("meterreporter_invalid_input")
)
type Reporter interface {
factory.ServiceWithHealthy
}

View File

@@ -1,39 +0,0 @@
package noopmeterreporter
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/meterreporter"
)
type provider struct {
healthyC chan struct{}
stopC chan struct{}
}
func NewFactory() factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config] {
return factory.NewProviderFactory(factory.MustNewName("noop"), New)
}
func New(_ context.Context, _ factory.ProviderSettings, _ meterreporter.Config) (meterreporter.Reporter, error) {
return &provider{
healthyC: make(chan struct{}),
stopC: make(chan struct{}),
}, nil
}
func (p *provider) Start(_ context.Context) error {
close(p.healthyC)
<-p.stopC
return nil
}
func (p *provider) Stop(_ context.Context) error {
close(p.stopC)
return nil
}
func (p *provider) Healthy() <-chan struct{} {
return p.healthyC
}

View File

@@ -1,52 +0,0 @@
package implretention
import (
"context"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/modules/retention"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type getter struct {
store retentiontypes.Store
}
// NewGetter creates a retention getter backed by the retention store.
func NewGetter(store retentiontypes.Store) retention.Getter {
return &getter{
store: store,
}
}
// GetRetentionPolicySegments loads successful TTL changes and converts them into retention policy segments.
func (getter *getter) GetRetentionPolicySegments(
ctx context.Context,
orgID valuer.UUID,
dbName string,
tableName string,
fallbackDefaultDays int,
startMs int64,
endMs int64,
) ([]*retentiontypes.RetentionPolicySegment, error) {
if startMs >= endMs {
return nil, nil
}
if dbName == "" {
return nil, errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "dbName is empty")
}
if tableName == "" {
return nil, errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "tableName is empty")
}
if fallbackDefaultDays <= 0 {
return nil, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "non-positive fallbackDefaultDays %d", fallbackDefaultDays)
}
rows, err := getter.store.ListTTLSettingsByTableNameAndBeforeCreatedAt(ctx, orgID, dbName+"."+tableName, endMs)
if err != nil {
return nil, err
}
return retentiontypes.BuildRetentionPolicySegmentsFromRows(rows, fallbackDefaultDays, startMs, endMs)
}

View File

@@ -1,41 +0,0 @@
package implretention
import (
"context"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type store struct {
sqlstore sqlstore.SQLStore
}
// NewStore creates a SQL-backed retention store.
func NewStore(sqlstore sqlstore.SQLStore) retentiontypes.Store {
return &store{sqlstore: sqlstore}
}
// ListTTLSettingsByTableNameAndBeforeCreatedAt returns successful TTL settings before the given timestamp.
func (store *store) ListTTLSettingsByTableNameAndBeforeCreatedAt(ctx context.Context, orgID valuer.UUID, tableName string, beforeMs int64) ([]*retentiontypes.TTLSetting, error) {
rows := []*retentiontypes.TTLSetting{}
err := store.
sqlstore.
BunDB().
NewSelect().
Model(&rows).
Where("table_name = ?", tableName).
Where("org_id = ?", orgID.StringValue()).
Where("status = ?", retentiontypes.TTLSettingStatusSuccess).
Where("created_at < ?", time.UnixMilli(beforeMs).UTC()).
OrderExpr("created_at ASC").
Scan(ctx)
if err != nil {
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "load ttl_setting rows for org %q table %q", orgID.StringValue(), tableName)
}
return rows, nil
}

View File

@@ -1,14 +0,0 @@
package retention
import (
"context"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
// Getter resolves retention data and expressions for read paths.
type Getter interface {
// GetRetentionPolicySegments returns retention policy segments active over a half-open meter window.
GetRetentionPolicySegments(ctx context.Context, orgID valuer.UUID, dbName string, tableName string, fallbackDefaultDays int, startMs int64, endMs int64) ([]*retentiontypes.RetentionPolicySegment, error)
}

File diff suppressed because one or more lines are too long

View File

@@ -24,13 +24,12 @@ HASTOKEN=23
HAS=24
HASANY=25
HASALL=26
SEARCH=27
BOOL=28
NUMBER=29
QUOTED_TEXT=30
KEY=31
WS=32
FREETEXT=33
BOOL=27
NUMBER=28
QUOTED_TEXT=29
KEY=30
WS=31
FREETEXT=32
'('=1
')'=2
'['=3

File diff suppressed because one or more lines are too long

View File

@@ -24,13 +24,12 @@ HASTOKEN=23
HAS=24
HASANY=25
HASALL=26
SEARCH=27
BOOL=28
NUMBER=29
QUOTED_TEXT=30
KEY=31
WS=32
FREETEXT=33
BOOL=27
NUMBER=28
QUOTED_TEXT=29
KEY=30
WS=31
FREETEXT=32
'('=1
')'=2
'['=3

View File

@@ -4,9 +4,10 @@ package parser
import (
"fmt"
"github.com/antlr4-go/antlr/v4"
"sync"
"unicode"
"github.com/antlr4-go/antlr/v4"
)
// Suppress unused import error
@@ -50,174 +51,170 @@ func filterquerylexerLexerInit() {
"", "LPAREN", "RPAREN", "LBRACK", "RBRACK", "COMMA", "EQUALS", "NOT_EQUALS",
"NEQ", "LT", "LE", "GT", "GE", "LIKE", "ILIKE", "BETWEEN", "EXISTS",
"REGEXP", "CONTAINS", "IN", "NOT", "AND", "OR", "HASTOKEN", "HAS", "HASANY",
"HASALL", "SEARCH", "BOOL", "NUMBER", "QUOTED_TEXT", "KEY", "WS", "FREETEXT",
"HASALL", "BOOL", "NUMBER", "QUOTED_TEXT", "KEY", "WS", "FREETEXT",
}
staticData.RuleNames = []string{
"LPAREN", "RPAREN", "LBRACK", "RBRACK", "COMMA", "EQUALS", "NOT_EQUALS",
"NEQ", "LT", "LE", "GT", "GE", "LIKE", "ILIKE", "BETWEEN", "EXISTS",
"REGEXP", "CONTAINS", "IN", "NOT", "AND", "OR", "HASTOKEN", "HAS", "HASANY",
"HASALL", "SEARCH", "BOOL", "SIGN", "NUMBER", "QUOTED_TEXT", "SEGMENT",
"EMPTY_BRACKS", "OLD_JSON_BRACKS", "KEY", "WS", "DIGIT", "FREETEXT",
"HASALL", "BOOL", "SIGN", "NUMBER", "QUOTED_TEXT", "SEGMENT", "EMPTY_BRACKS",
"OLD_JSON_BRACKS", "KEY", "WS", "DIGIT", "FREETEXT",
}
staticData.PredictionContextCache = antlr.NewPredictionContextCache()
staticData.serializedATN = []int32{
4, 0, 33, 329, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2,
4, 0, 32, 320, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2,
4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2,
10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15,
7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7,
20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25,
2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2,
31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36,
7, 36, 2, 37, 7, 37, 1, 0, 1, 0, 1, 1, 1, 1, 1, 2, 1, 2, 1, 3, 1, 3, 1,
4, 1, 4, 1, 5, 1, 5, 1, 5, 3, 5, 91, 8, 5, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7,
1, 7, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 11,
1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1,
13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15,
1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 134, 8, 15, 1, 16, 1, 16, 1, 16, 1,
16, 1, 16, 1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17,
1, 17, 3, 17, 151, 8, 17, 1, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 19, 1,
19, 1, 20, 1, 20, 1, 20, 1, 20, 1, 21, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22,
1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 23, 1,
24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25,
1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1,
27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 3, 27, 210,
8, 27, 1, 28, 1, 28, 1, 29, 3, 29, 215, 8, 29, 1, 29, 4, 29, 218, 8, 29,
11, 29, 12, 29, 219, 1, 29, 1, 29, 5, 29, 224, 8, 29, 10, 29, 12, 29, 227,
9, 29, 3, 29, 229, 8, 29, 1, 29, 1, 29, 3, 29, 233, 8, 29, 1, 29, 4, 29,
236, 8, 29, 11, 29, 12, 29, 237, 3, 29, 240, 8, 29, 1, 29, 3, 29, 243,
8, 29, 1, 29, 1, 29, 4, 29, 247, 8, 29, 11, 29, 12, 29, 248, 1, 29, 1,
29, 3, 29, 253, 8, 29, 1, 29, 4, 29, 256, 8, 29, 11, 29, 12, 29, 257, 3,
29, 260, 8, 29, 3, 29, 262, 8, 29, 1, 30, 1, 30, 1, 30, 1, 30, 5, 30, 268,
8, 30, 10, 30, 12, 30, 271, 9, 30, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 5,
30, 278, 8, 30, 10, 30, 12, 30, 281, 9, 30, 1, 30, 3, 30, 284, 8, 30, 1,
31, 1, 31, 5, 31, 288, 8, 31, 10, 31, 12, 31, 291, 9, 31, 1, 32, 1, 32,
1, 32, 1, 33, 1, 33, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1,
34, 1, 34, 4, 34, 307, 8, 34, 11, 34, 12, 34, 308, 5, 34, 311, 8, 34, 10,
34, 12, 34, 314, 9, 34, 1, 35, 4, 35, 317, 8, 35, 11, 35, 12, 35, 318,
1, 35, 1, 35, 1, 36, 1, 36, 1, 37, 4, 37, 326, 8, 37, 11, 37, 12, 37, 327,
0, 0, 38, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19,
7, 36, 1, 0, 1, 0, 1, 1, 1, 1, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 5,
1, 5, 1, 5, 3, 5, 89, 8, 5, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1,
8, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1,
12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 14, 1, 14,
1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 1, 15, 1,
15, 1, 15, 3, 15, 132, 8, 15, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16,
1, 16, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 3, 17, 149,
8, 17, 1, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 19, 1, 19, 1, 20, 1, 20, 1,
20, 1, 20, 1, 21, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22,
1, 22, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 1,
24, 1, 24, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25,
1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 3, 26, 201,
8, 26, 1, 27, 1, 27, 1, 28, 3, 28, 206, 8, 28, 1, 28, 4, 28, 209, 8, 28,
11, 28, 12, 28, 210, 1, 28, 1, 28, 5, 28, 215, 8, 28, 10, 28, 12, 28, 218,
9, 28, 3, 28, 220, 8, 28, 1, 28, 1, 28, 3, 28, 224, 8, 28, 1, 28, 4, 28,
227, 8, 28, 11, 28, 12, 28, 228, 3, 28, 231, 8, 28, 1, 28, 3, 28, 234,
8, 28, 1, 28, 1, 28, 4, 28, 238, 8, 28, 11, 28, 12, 28, 239, 1, 28, 1,
28, 3, 28, 244, 8, 28, 1, 28, 4, 28, 247, 8, 28, 11, 28, 12, 28, 248, 3,
28, 251, 8, 28, 3, 28, 253, 8, 28, 1, 29, 1, 29, 1, 29, 1, 29, 5, 29, 259,
8, 29, 10, 29, 12, 29, 262, 9, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 5,
29, 269, 8, 29, 10, 29, 12, 29, 272, 9, 29, 1, 29, 3, 29, 275, 8, 29, 1,
30, 1, 30, 5, 30, 279, 8, 30, 10, 30, 12, 30, 282, 9, 30, 1, 31, 1, 31,
1, 31, 1, 32, 1, 32, 1, 32, 1, 32, 1, 33, 1, 33, 1, 33, 1, 33, 1, 33, 1,
33, 1, 33, 4, 33, 298, 8, 33, 11, 33, 12, 33, 299, 5, 33, 302, 8, 33, 10,
33, 12, 33, 305, 9, 33, 1, 34, 4, 34, 308, 8, 34, 11, 34, 12, 34, 309,
1, 34, 1, 34, 1, 35, 1, 35, 1, 36, 4, 36, 317, 8, 36, 11, 36, 12, 36, 318,
0, 0, 37, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19,
10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 17, 35, 18, 37,
19, 39, 20, 41, 21, 43, 22, 45, 23, 47, 24, 49, 25, 51, 26, 53, 27, 55,
28, 57, 0, 59, 29, 61, 30, 63, 0, 65, 0, 67, 0, 69, 31, 71, 32, 73, 0,
75, 33, 1, 0, 29, 2, 0, 76, 76, 108, 108, 2, 0, 73, 73, 105, 105, 2, 0,
75, 75, 107, 107, 2, 0, 69, 69, 101, 101, 2, 0, 66, 66, 98, 98, 2, 0, 84,
84, 116, 116, 2, 0, 87, 87, 119, 119, 2, 0, 78, 78, 110, 110, 2, 0, 88,
88, 120, 120, 2, 0, 83, 83, 115, 115, 2, 0, 82, 82, 114, 114, 2, 0, 71,
71, 103, 103, 2, 0, 80, 80, 112, 112, 2, 0, 67, 67, 99, 99, 2, 0, 79, 79,
111, 111, 2, 0, 65, 65, 97, 97, 2, 0, 68, 68, 100, 100, 2, 0, 72, 72, 104,
104, 2, 0, 89, 89, 121, 121, 2, 0, 85, 85, 117, 117, 2, 0, 70, 70, 102,
102, 2, 0, 43, 43, 45, 45, 2, 0, 34, 34, 92, 92, 2, 0, 39, 39, 92, 92,
4, 0, 35, 36, 64, 90, 95, 95, 97, 123, 7, 0, 35, 36, 45, 45, 47, 58, 64,
90, 95, 95, 97, 123, 125, 125, 3, 0, 9, 10, 13, 13, 32, 32, 1, 0, 48, 57,
8, 0, 9, 10, 13, 13, 32, 34, 39, 41, 44, 44, 60, 62, 91, 91, 93, 93, 353,
0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0,
0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0,
0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0,
0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1,
0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39,
1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0,
47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0,
0, 55, 1, 0, 0, 0, 0, 59, 1, 0, 0, 0, 0, 61, 1, 0, 0, 0, 0, 69, 1, 0, 0,
0, 0, 71, 1, 0, 0, 0, 0, 75, 1, 0, 0, 0, 1, 77, 1, 0, 0, 0, 3, 79, 1, 0,
0, 0, 5, 81, 1, 0, 0, 0, 7, 83, 1, 0, 0, 0, 9, 85, 1, 0, 0, 0, 11, 90,
1, 0, 0, 0, 13, 92, 1, 0, 0, 0, 15, 95, 1, 0, 0, 0, 17, 98, 1, 0, 0, 0,
19, 100, 1, 0, 0, 0, 21, 103, 1, 0, 0, 0, 23, 105, 1, 0, 0, 0, 25, 108,
1, 0, 0, 0, 27, 113, 1, 0, 0, 0, 29, 119, 1, 0, 0, 0, 31, 127, 1, 0, 0,
0, 33, 135, 1, 0, 0, 0, 35, 142, 1, 0, 0, 0, 37, 152, 1, 0, 0, 0, 39, 155,
1, 0, 0, 0, 41, 159, 1, 0, 0, 0, 43, 163, 1, 0, 0, 0, 45, 166, 1, 0, 0,
0, 47, 175, 1, 0, 0, 0, 49, 179, 1, 0, 0, 0, 51, 186, 1, 0, 0, 0, 53, 193,
1, 0, 0, 0, 55, 209, 1, 0, 0, 0, 57, 211, 1, 0, 0, 0, 59, 261, 1, 0, 0,
0, 61, 283, 1, 0, 0, 0, 63, 285, 1, 0, 0, 0, 65, 292, 1, 0, 0, 0, 67, 295,
1, 0, 0, 0, 69, 299, 1, 0, 0, 0, 71, 316, 1, 0, 0, 0, 73, 322, 1, 0, 0,
0, 75, 325, 1, 0, 0, 0, 77, 78, 5, 40, 0, 0, 78, 2, 1, 0, 0, 0, 79, 80,
5, 41, 0, 0, 80, 4, 1, 0, 0, 0, 81, 82, 5, 91, 0, 0, 82, 6, 1, 0, 0, 0,
83, 84, 5, 93, 0, 0, 84, 8, 1, 0, 0, 0, 85, 86, 5, 44, 0, 0, 86, 10, 1,
0, 0, 0, 87, 91, 5, 61, 0, 0, 88, 89, 5, 61, 0, 0, 89, 91, 5, 61, 0, 0,
90, 87, 1, 0, 0, 0, 90, 88, 1, 0, 0, 0, 91, 12, 1, 0, 0, 0, 92, 93, 5,
33, 0, 0, 93, 94, 5, 61, 0, 0, 94, 14, 1, 0, 0, 0, 95, 96, 5, 60, 0, 0,
96, 97, 5, 62, 0, 0, 97, 16, 1, 0, 0, 0, 98, 99, 5, 60, 0, 0, 99, 18, 1,
0, 0, 0, 100, 101, 5, 60, 0, 0, 101, 102, 5, 61, 0, 0, 102, 20, 1, 0, 0,
0, 103, 104, 5, 62, 0, 0, 104, 22, 1, 0, 0, 0, 105, 106, 5, 62, 0, 0, 106,
107, 5, 61, 0, 0, 107, 24, 1, 0, 0, 0, 108, 109, 7, 0, 0, 0, 109, 110,
7, 1, 0, 0, 110, 111, 7, 2, 0, 0, 111, 112, 7, 3, 0, 0, 112, 26, 1, 0,
0, 0, 113, 114, 7, 1, 0, 0, 114, 115, 7, 0, 0, 0, 115, 116, 7, 1, 0, 0,
116, 117, 7, 2, 0, 0, 117, 118, 7, 3, 0, 0, 118, 28, 1, 0, 0, 0, 119, 120,
7, 4, 0, 0, 120, 121, 7, 3, 0, 0, 121, 122, 7, 5, 0, 0, 122, 123, 7, 6,
0, 0, 123, 124, 7, 3, 0, 0, 124, 125, 7, 3, 0, 0, 125, 126, 7, 7, 0, 0,
126, 30, 1, 0, 0, 0, 127, 128, 7, 3, 0, 0, 128, 129, 7, 8, 0, 0, 129, 130,
7, 1, 0, 0, 130, 131, 7, 9, 0, 0, 131, 133, 7, 5, 0, 0, 132, 134, 7, 9,
0, 0, 133, 132, 1, 0, 0, 0, 133, 134, 1, 0, 0, 0, 134, 32, 1, 0, 0, 0,
135, 136, 7, 10, 0, 0, 136, 137, 7, 3, 0, 0, 137, 138, 7, 11, 0, 0, 138,
139, 7, 3, 0, 0, 139, 140, 7, 8, 0, 0, 140, 141, 7, 12, 0, 0, 141, 34,
1, 0, 0, 0, 142, 143, 7, 13, 0, 0, 143, 144, 7, 14, 0, 0, 144, 145, 7,
7, 0, 0, 145, 146, 7, 5, 0, 0, 146, 147, 7, 15, 0, 0, 147, 148, 7, 1, 0,
0, 148, 150, 7, 7, 0, 0, 149, 151, 7, 9, 0, 0, 150, 149, 1, 0, 0, 0, 150,
151, 1, 0, 0, 0, 151, 36, 1, 0, 0, 0, 152, 153, 7, 1, 0, 0, 153, 154, 7,
7, 0, 0, 154, 38, 1, 0, 0, 0, 155, 156, 7, 7, 0, 0, 156, 157, 7, 14, 0,
0, 157, 158, 7, 5, 0, 0, 158, 40, 1, 0, 0, 0, 159, 160, 7, 15, 0, 0, 160,
161, 7, 7, 0, 0, 161, 162, 7, 16, 0, 0, 162, 42, 1, 0, 0, 0, 163, 164,
7, 14, 0, 0, 164, 165, 7, 10, 0, 0, 165, 44, 1, 0, 0, 0, 166, 167, 7, 17,
0, 0, 167, 168, 7, 15, 0, 0, 168, 169, 7, 9, 0, 0, 169, 170, 7, 5, 0, 0,
170, 171, 7, 14, 0, 0, 171, 172, 7, 2, 0, 0, 172, 173, 7, 3, 0, 0, 173,
174, 7, 7, 0, 0, 174, 46, 1, 0, 0, 0, 175, 176, 7, 17, 0, 0, 176, 177,
7, 15, 0, 0, 177, 178, 7, 9, 0, 0, 178, 48, 1, 0, 0, 0, 179, 180, 7, 17,
0, 0, 180, 181, 7, 15, 0, 0, 181, 182, 7, 9, 0, 0, 182, 183, 7, 15, 0,
0, 183, 184, 7, 7, 0, 0, 184, 185, 7, 18, 0, 0, 185, 50, 1, 0, 0, 0, 186,
187, 7, 17, 0, 0, 187, 188, 7, 15, 0, 0, 188, 189, 7, 9, 0, 0, 189, 190,
7, 15, 0, 0, 190, 191, 7, 0, 0, 0, 191, 192, 7, 0, 0, 0, 192, 52, 1, 0,
0, 0, 193, 194, 7, 9, 0, 0, 194, 195, 7, 3, 0, 0, 195, 196, 7, 15, 0, 0,
196, 197, 7, 10, 0, 0, 197, 198, 7, 13, 0, 0, 198, 199, 7, 17, 0, 0, 199,
54, 1, 0, 0, 0, 200, 201, 7, 5, 0, 0, 201, 202, 7, 10, 0, 0, 202, 203,
7, 19, 0, 0, 203, 210, 7, 3, 0, 0, 204, 205, 7, 20, 0, 0, 205, 206, 7,
15, 0, 0, 206, 207, 7, 0, 0, 0, 207, 208, 7, 9, 0, 0, 208, 210, 7, 3, 0,
0, 209, 200, 1, 0, 0, 0, 209, 204, 1, 0, 0, 0, 210, 56, 1, 0, 0, 0, 211,
212, 7, 21, 0, 0, 212, 58, 1, 0, 0, 0, 213, 215, 3, 57, 28, 0, 214, 213,
1, 0, 0, 0, 214, 215, 1, 0, 0, 0, 215, 217, 1, 0, 0, 0, 216, 218, 3, 73,
36, 0, 217, 216, 1, 0, 0, 0, 218, 219, 1, 0, 0, 0, 219, 217, 1, 0, 0, 0,
219, 220, 1, 0, 0, 0, 220, 228, 1, 0, 0, 0, 221, 225, 5, 46, 0, 0, 222,
224, 3, 73, 36, 0, 223, 222, 1, 0, 0, 0, 224, 227, 1, 0, 0, 0, 225, 223,
1, 0, 0, 0, 225, 226, 1, 0, 0, 0, 226, 229, 1, 0, 0, 0, 227, 225, 1, 0,
0, 0, 228, 221, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 239, 1, 0, 0, 0,
230, 232, 7, 3, 0, 0, 231, 233, 3, 57, 28, 0, 232, 231, 1, 0, 0, 0, 232,
233, 1, 0, 0, 0, 233, 235, 1, 0, 0, 0, 234, 236, 3, 73, 36, 0, 235, 234,
1, 0, 0, 0, 236, 237, 1, 0, 0, 0, 237, 235, 1, 0, 0, 0, 237, 238, 1, 0,
0, 0, 238, 240, 1, 0, 0, 0, 239, 230, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0,
240, 262, 1, 0, 0, 0, 241, 243, 3, 57, 28, 0, 242, 241, 1, 0, 0, 0, 242,
243, 1, 0, 0, 0, 243, 244, 1, 0, 0, 0, 244, 246, 5, 46, 0, 0, 245, 247,
3, 73, 36, 0, 246, 245, 1, 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 246, 1,
0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 259, 1, 0, 0, 0, 250, 252, 7, 3, 0,
0, 251, 253, 3, 57, 28, 0, 252, 251, 1, 0, 0, 0, 252, 253, 1, 0, 0, 0,
253, 255, 1, 0, 0, 0, 254, 256, 3, 73, 36, 0, 255, 254, 1, 0, 0, 0, 256,
257, 1, 0, 0, 0, 257, 255, 1, 0, 0, 0, 257, 258, 1, 0, 0, 0, 258, 260,
1, 0, 0, 0, 259, 250, 1, 0, 0, 0, 259, 260, 1, 0, 0, 0, 260, 262, 1, 0,
0, 0, 261, 214, 1, 0, 0, 0, 261, 242, 1, 0, 0, 0, 262, 60, 1, 0, 0, 0,
263, 269, 5, 34, 0, 0, 264, 268, 8, 22, 0, 0, 265, 266, 5, 92, 0, 0, 266,
268, 9, 0, 0, 0, 267, 264, 1, 0, 0, 0, 267, 265, 1, 0, 0, 0, 268, 271,
1, 0, 0, 0, 269, 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 272, 1, 0,
0, 0, 271, 269, 1, 0, 0, 0, 272, 284, 5, 34, 0, 0, 273, 279, 5, 39, 0,
0, 274, 278, 8, 23, 0, 0, 275, 276, 5, 92, 0, 0, 276, 278, 9, 0, 0, 0,
277, 274, 1, 0, 0, 0, 277, 275, 1, 0, 0, 0, 278, 281, 1, 0, 0, 0, 279,
277, 1, 0, 0, 0, 279, 280, 1, 0, 0, 0, 280, 282, 1, 0, 0, 0, 281, 279,
1, 0, 0, 0, 282, 284, 5, 39, 0, 0, 283, 263, 1, 0, 0, 0, 283, 273, 1, 0,
0, 0, 284, 62, 1, 0, 0, 0, 285, 289, 7, 24, 0, 0, 286, 288, 7, 25, 0, 0,
287, 286, 1, 0, 0, 0, 288, 291, 1, 0, 0, 0, 289, 287, 1, 0, 0, 0, 289,
290, 1, 0, 0, 0, 290, 64, 1, 0, 0, 0, 291, 289, 1, 0, 0, 0, 292, 293, 5,
91, 0, 0, 293, 294, 5, 93, 0, 0, 294, 66, 1, 0, 0, 0, 295, 296, 5, 91,
0, 0, 296, 297, 5, 42, 0, 0, 297, 298, 5, 93, 0, 0, 298, 68, 1, 0, 0, 0,
299, 312, 3, 63, 31, 0, 300, 301, 5, 46, 0, 0, 301, 311, 3, 63, 31, 0,
302, 311, 3, 65, 32, 0, 303, 311, 3, 67, 33, 0, 304, 306, 5, 46, 0, 0,
305, 307, 3, 73, 36, 0, 306, 305, 1, 0, 0, 0, 307, 308, 1, 0, 0, 0, 308,
306, 1, 0, 0, 0, 308, 309, 1, 0, 0, 0, 309, 311, 1, 0, 0, 0, 310, 300,
1, 0, 0, 0, 310, 302, 1, 0, 0, 0, 310, 303, 1, 0, 0, 0, 310, 304, 1, 0,
0, 0, 311, 314, 1, 0, 0, 0, 312, 310, 1, 0, 0, 0, 312, 313, 1, 0, 0, 0,
313, 70, 1, 0, 0, 0, 314, 312, 1, 0, 0, 0, 315, 317, 7, 26, 0, 0, 316,
315, 1, 0, 0, 0, 317, 318, 1, 0, 0, 0, 318, 316, 1, 0, 0, 0, 318, 319,
1, 0, 0, 0, 319, 320, 1, 0, 0, 0, 320, 321, 6, 35, 0, 0, 321, 72, 1, 0,
0, 0, 322, 323, 7, 27, 0, 0, 323, 74, 1, 0, 0, 0, 324, 326, 8, 28, 0, 0,
325, 324, 1, 0, 0, 0, 326, 327, 1, 0, 0, 0, 327, 325, 1, 0, 0, 0, 327,
328, 1, 0, 0, 0, 328, 76, 1, 0, 0, 0, 29, 0, 90, 133, 150, 209, 214, 219,
225, 228, 232, 237, 239, 242, 248, 252, 257, 259, 261, 267, 269, 277, 279,
283, 289, 308, 310, 312, 318, 327, 1, 6, 0, 0,
0, 57, 28, 59, 29, 61, 0, 63, 0, 65, 0, 67, 30, 69, 31, 71, 0, 73, 32,
1, 0, 29, 2, 0, 76, 76, 108, 108, 2, 0, 73, 73, 105, 105, 2, 0, 75, 75,
107, 107, 2, 0, 69, 69, 101, 101, 2, 0, 66, 66, 98, 98, 2, 0, 84, 84, 116,
116, 2, 0, 87, 87, 119, 119, 2, 0, 78, 78, 110, 110, 2, 0, 88, 88, 120,
120, 2, 0, 83, 83, 115, 115, 2, 0, 82, 82, 114, 114, 2, 0, 71, 71, 103,
103, 2, 0, 80, 80, 112, 112, 2, 0, 67, 67, 99, 99, 2, 0, 79, 79, 111, 111,
2, 0, 65, 65, 97, 97, 2, 0, 68, 68, 100, 100, 2, 0, 72, 72, 104, 104, 2,
0, 89, 89, 121, 121, 2, 0, 85, 85, 117, 117, 2, 0, 70, 70, 102, 102, 2,
0, 43, 43, 45, 45, 2, 0, 34, 34, 92, 92, 2, 0, 39, 39, 92, 92, 4, 0, 35,
36, 64, 90, 95, 95, 97, 123, 7, 0, 35, 36, 45, 45, 47, 58, 64, 90, 95,
95, 97, 123, 125, 125, 3, 0, 9, 10, 13, 13, 32, 32, 1, 0, 48, 57, 8, 0,
9, 10, 13, 13, 32, 34, 39, 41, 44, 44, 60, 62, 91, 91, 93, 93, 344, 0,
1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0,
9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0,
0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0,
0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0,
0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1,
0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47,
1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0,
57, 1, 0, 0, 0, 0, 59, 1, 0, 0, 0, 0, 67, 1, 0, 0, 0, 0, 69, 1, 0, 0, 0,
0, 73, 1, 0, 0, 0, 1, 75, 1, 0, 0, 0, 3, 77, 1, 0, 0, 0, 5, 79, 1, 0, 0,
0, 7, 81, 1, 0, 0, 0, 9, 83, 1, 0, 0, 0, 11, 88, 1, 0, 0, 0, 13, 90, 1,
0, 0, 0, 15, 93, 1, 0, 0, 0, 17, 96, 1, 0, 0, 0, 19, 98, 1, 0, 0, 0, 21,
101, 1, 0, 0, 0, 23, 103, 1, 0, 0, 0, 25, 106, 1, 0, 0, 0, 27, 111, 1,
0, 0, 0, 29, 117, 1, 0, 0, 0, 31, 125, 1, 0, 0, 0, 33, 133, 1, 0, 0, 0,
35, 140, 1, 0, 0, 0, 37, 150, 1, 0, 0, 0, 39, 153, 1, 0, 0, 0, 41, 157,
1, 0, 0, 0, 43, 161, 1, 0, 0, 0, 45, 164, 1, 0, 0, 0, 47, 173, 1, 0, 0,
0, 49, 177, 1, 0, 0, 0, 51, 184, 1, 0, 0, 0, 53, 200, 1, 0, 0, 0, 55, 202,
1, 0, 0, 0, 57, 252, 1, 0, 0, 0, 59, 274, 1, 0, 0, 0, 61, 276, 1, 0, 0,
0, 63, 283, 1, 0, 0, 0, 65, 286, 1, 0, 0, 0, 67, 290, 1, 0, 0, 0, 69, 307,
1, 0, 0, 0, 71, 313, 1, 0, 0, 0, 73, 316, 1, 0, 0, 0, 75, 76, 5, 40, 0,
0, 76, 2, 1, 0, 0, 0, 77, 78, 5, 41, 0, 0, 78, 4, 1, 0, 0, 0, 79, 80, 5,
91, 0, 0, 80, 6, 1, 0, 0, 0, 81, 82, 5, 93, 0, 0, 82, 8, 1, 0, 0, 0, 83,
84, 5, 44, 0, 0, 84, 10, 1, 0, 0, 0, 85, 89, 5, 61, 0, 0, 86, 87, 5, 61,
0, 0, 87, 89, 5, 61, 0, 0, 88, 85, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, 89,
12, 1, 0, 0, 0, 90, 91, 5, 33, 0, 0, 91, 92, 5, 61, 0, 0, 92, 14, 1, 0,
0, 0, 93, 94, 5, 60, 0, 0, 94, 95, 5, 62, 0, 0, 95, 16, 1, 0, 0, 0, 96,
97, 5, 60, 0, 0, 97, 18, 1, 0, 0, 0, 98, 99, 5, 60, 0, 0, 99, 100, 5, 61,
0, 0, 100, 20, 1, 0, 0, 0, 101, 102, 5, 62, 0, 0, 102, 22, 1, 0, 0, 0,
103, 104, 5, 62, 0, 0, 104, 105, 5, 61, 0, 0, 105, 24, 1, 0, 0, 0, 106,
107, 7, 0, 0, 0, 107, 108, 7, 1, 0, 0, 108, 109, 7, 2, 0, 0, 109, 110,
7, 3, 0, 0, 110, 26, 1, 0, 0, 0, 111, 112, 7, 1, 0, 0, 112, 113, 7, 0,
0, 0, 113, 114, 7, 1, 0, 0, 114, 115, 7, 2, 0, 0, 115, 116, 7, 3, 0, 0,
116, 28, 1, 0, 0, 0, 117, 118, 7, 4, 0, 0, 118, 119, 7, 3, 0, 0, 119, 120,
7, 5, 0, 0, 120, 121, 7, 6, 0, 0, 121, 122, 7, 3, 0, 0, 122, 123, 7, 3,
0, 0, 123, 124, 7, 7, 0, 0, 124, 30, 1, 0, 0, 0, 125, 126, 7, 3, 0, 0,
126, 127, 7, 8, 0, 0, 127, 128, 7, 1, 0, 0, 128, 129, 7, 9, 0, 0, 129,
131, 7, 5, 0, 0, 130, 132, 7, 9, 0, 0, 131, 130, 1, 0, 0, 0, 131, 132,
1, 0, 0, 0, 132, 32, 1, 0, 0, 0, 133, 134, 7, 10, 0, 0, 134, 135, 7, 3,
0, 0, 135, 136, 7, 11, 0, 0, 136, 137, 7, 3, 0, 0, 137, 138, 7, 8, 0, 0,
138, 139, 7, 12, 0, 0, 139, 34, 1, 0, 0, 0, 140, 141, 7, 13, 0, 0, 141,
142, 7, 14, 0, 0, 142, 143, 7, 7, 0, 0, 143, 144, 7, 5, 0, 0, 144, 145,
7, 15, 0, 0, 145, 146, 7, 1, 0, 0, 146, 148, 7, 7, 0, 0, 147, 149, 7, 9,
0, 0, 148, 147, 1, 0, 0, 0, 148, 149, 1, 0, 0, 0, 149, 36, 1, 0, 0, 0,
150, 151, 7, 1, 0, 0, 151, 152, 7, 7, 0, 0, 152, 38, 1, 0, 0, 0, 153, 154,
7, 7, 0, 0, 154, 155, 7, 14, 0, 0, 155, 156, 7, 5, 0, 0, 156, 40, 1, 0,
0, 0, 157, 158, 7, 15, 0, 0, 158, 159, 7, 7, 0, 0, 159, 160, 7, 16, 0,
0, 160, 42, 1, 0, 0, 0, 161, 162, 7, 14, 0, 0, 162, 163, 7, 10, 0, 0, 163,
44, 1, 0, 0, 0, 164, 165, 7, 17, 0, 0, 165, 166, 7, 15, 0, 0, 166, 167,
7, 9, 0, 0, 167, 168, 7, 5, 0, 0, 168, 169, 7, 14, 0, 0, 169, 170, 7, 2,
0, 0, 170, 171, 7, 3, 0, 0, 171, 172, 7, 7, 0, 0, 172, 46, 1, 0, 0, 0,
173, 174, 7, 17, 0, 0, 174, 175, 7, 15, 0, 0, 175, 176, 7, 9, 0, 0, 176,
48, 1, 0, 0, 0, 177, 178, 7, 17, 0, 0, 178, 179, 7, 15, 0, 0, 179, 180,
7, 9, 0, 0, 180, 181, 7, 15, 0, 0, 181, 182, 7, 7, 0, 0, 182, 183, 7, 18,
0, 0, 183, 50, 1, 0, 0, 0, 184, 185, 7, 17, 0, 0, 185, 186, 7, 15, 0, 0,
186, 187, 7, 9, 0, 0, 187, 188, 7, 15, 0, 0, 188, 189, 7, 0, 0, 0, 189,
190, 7, 0, 0, 0, 190, 52, 1, 0, 0, 0, 191, 192, 7, 5, 0, 0, 192, 193, 7,
10, 0, 0, 193, 194, 7, 19, 0, 0, 194, 201, 7, 3, 0, 0, 195, 196, 7, 20,
0, 0, 196, 197, 7, 15, 0, 0, 197, 198, 7, 0, 0, 0, 198, 199, 7, 9, 0, 0,
199, 201, 7, 3, 0, 0, 200, 191, 1, 0, 0, 0, 200, 195, 1, 0, 0, 0, 201,
54, 1, 0, 0, 0, 202, 203, 7, 21, 0, 0, 203, 56, 1, 0, 0, 0, 204, 206, 3,
55, 27, 0, 205, 204, 1, 0, 0, 0, 205, 206, 1, 0, 0, 0, 206, 208, 1, 0,
0, 0, 207, 209, 3, 71, 35, 0, 208, 207, 1, 0, 0, 0, 209, 210, 1, 0, 0,
0, 210, 208, 1, 0, 0, 0, 210, 211, 1, 0, 0, 0, 211, 219, 1, 0, 0, 0, 212,
216, 5, 46, 0, 0, 213, 215, 3, 71, 35, 0, 214, 213, 1, 0, 0, 0, 215, 218,
1, 0, 0, 0, 216, 214, 1, 0, 0, 0, 216, 217, 1, 0, 0, 0, 217, 220, 1, 0,
0, 0, 218, 216, 1, 0, 0, 0, 219, 212, 1, 0, 0, 0, 219, 220, 1, 0, 0, 0,
220, 230, 1, 0, 0, 0, 221, 223, 7, 3, 0, 0, 222, 224, 3, 55, 27, 0, 223,
222, 1, 0, 0, 0, 223, 224, 1, 0, 0, 0, 224, 226, 1, 0, 0, 0, 225, 227,
3, 71, 35, 0, 226, 225, 1, 0, 0, 0, 227, 228, 1, 0, 0, 0, 228, 226, 1,
0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 231, 1, 0, 0, 0, 230, 221, 1, 0, 0,
0, 230, 231, 1, 0, 0, 0, 231, 253, 1, 0, 0, 0, 232, 234, 3, 55, 27, 0,
233, 232, 1, 0, 0, 0, 233, 234, 1, 0, 0, 0, 234, 235, 1, 0, 0, 0, 235,
237, 5, 46, 0, 0, 236, 238, 3, 71, 35, 0, 237, 236, 1, 0, 0, 0, 238, 239,
1, 0, 0, 0, 239, 237, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 250, 1, 0,
0, 0, 241, 243, 7, 3, 0, 0, 242, 244, 3, 55, 27, 0, 243, 242, 1, 0, 0,
0, 243, 244, 1, 0, 0, 0, 244, 246, 1, 0, 0, 0, 245, 247, 3, 71, 35, 0,
246, 245, 1, 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 246, 1, 0, 0, 0, 248,
249, 1, 0, 0, 0, 249, 251, 1, 0, 0, 0, 250, 241, 1, 0, 0, 0, 250, 251,
1, 0, 0, 0, 251, 253, 1, 0, 0, 0, 252, 205, 1, 0, 0, 0, 252, 233, 1, 0,
0, 0, 253, 58, 1, 0, 0, 0, 254, 260, 5, 34, 0, 0, 255, 259, 8, 22, 0, 0,
256, 257, 5, 92, 0, 0, 257, 259, 9, 0, 0, 0, 258, 255, 1, 0, 0, 0, 258,
256, 1, 0, 0, 0, 259, 262, 1, 0, 0, 0, 260, 258, 1, 0, 0, 0, 260, 261,
1, 0, 0, 0, 261, 263, 1, 0, 0, 0, 262, 260, 1, 0, 0, 0, 263, 275, 5, 34,
0, 0, 264, 270, 5, 39, 0, 0, 265, 269, 8, 23, 0, 0, 266, 267, 5, 92, 0,
0, 267, 269, 9, 0, 0, 0, 268, 265, 1, 0, 0, 0, 268, 266, 1, 0, 0, 0, 269,
272, 1, 0, 0, 0, 270, 268, 1, 0, 0, 0, 270, 271, 1, 0, 0, 0, 271, 273,
1, 0, 0, 0, 272, 270, 1, 0, 0, 0, 273, 275, 5, 39, 0, 0, 274, 254, 1, 0,
0, 0, 274, 264, 1, 0, 0, 0, 275, 60, 1, 0, 0, 0, 276, 280, 7, 24, 0, 0,
277, 279, 7, 25, 0, 0, 278, 277, 1, 0, 0, 0, 279, 282, 1, 0, 0, 0, 280,
278, 1, 0, 0, 0, 280, 281, 1, 0, 0, 0, 281, 62, 1, 0, 0, 0, 282, 280, 1,
0, 0, 0, 283, 284, 5, 91, 0, 0, 284, 285, 5, 93, 0, 0, 285, 64, 1, 0, 0,
0, 286, 287, 5, 91, 0, 0, 287, 288, 5, 42, 0, 0, 288, 289, 5, 93, 0, 0,
289, 66, 1, 0, 0, 0, 290, 303, 3, 61, 30, 0, 291, 292, 5, 46, 0, 0, 292,
302, 3, 61, 30, 0, 293, 302, 3, 63, 31, 0, 294, 302, 3, 65, 32, 0, 295,
297, 5, 46, 0, 0, 296, 298, 3, 71, 35, 0, 297, 296, 1, 0, 0, 0, 298, 299,
1, 0, 0, 0, 299, 297, 1, 0, 0, 0, 299, 300, 1, 0, 0, 0, 300, 302, 1, 0,
0, 0, 301, 291, 1, 0, 0, 0, 301, 293, 1, 0, 0, 0, 301, 294, 1, 0, 0, 0,
301, 295, 1, 0, 0, 0, 302, 305, 1, 0, 0, 0, 303, 301, 1, 0, 0, 0, 303,
304, 1, 0, 0, 0, 304, 68, 1, 0, 0, 0, 305, 303, 1, 0, 0, 0, 306, 308, 7,
26, 0, 0, 307, 306, 1, 0, 0, 0, 308, 309, 1, 0, 0, 0, 309, 307, 1, 0, 0,
0, 309, 310, 1, 0, 0, 0, 310, 311, 1, 0, 0, 0, 311, 312, 6, 34, 0, 0, 312,
70, 1, 0, 0, 0, 313, 314, 7, 27, 0, 0, 314, 72, 1, 0, 0, 0, 315, 317, 8,
28, 0, 0, 316, 315, 1, 0, 0, 0, 317, 318, 1, 0, 0, 0, 318, 316, 1, 0, 0,
0, 318, 319, 1, 0, 0, 0, 319, 74, 1, 0, 0, 0, 29, 0, 88, 131, 148, 200,
205, 210, 216, 219, 223, 228, 230, 233, 239, 243, 248, 250, 252, 258, 260,
268, 270, 274, 280, 299, 301, 303, 309, 318, 1, 6, 0, 0,
}
deserializer := antlr.NewATNDeserializer(nil)
staticData.atn = deserializer.Deserialize(staticData.serializedATN)
@@ -284,11 +281,10 @@ const (
FilterQueryLexerHAS = 24
FilterQueryLexerHASANY = 25
FilterQueryLexerHASALL = 26
FilterQueryLexerSEARCH = 27
FilterQueryLexerBOOL = 28
FilterQueryLexerNUMBER = 29
FilterQueryLexerQUOTED_TEXT = 30
FilterQueryLexerKEY = 31
FilterQueryLexerWS = 32
FilterQueryLexerFREETEXT = 33
FilterQueryLexerBOOL = 27
FilterQueryLexerNUMBER = 28
FilterQueryLexerQUOTED_TEXT = 29
FilterQueryLexerKEY = 30
FilterQueryLexerWS = 31
FilterQueryLexerFREETEXT = 32
)

View File

@@ -40,7 +40,7 @@ func filterqueryParserInit() {
"", "LPAREN", "RPAREN", "LBRACK", "RBRACK", "COMMA", "EQUALS", "NOT_EQUALS",
"NEQ", "LT", "LE", "GT", "GE", "LIKE", "ILIKE", "BETWEEN", "EXISTS",
"REGEXP", "CONTAINS", "IN", "NOT", "AND", "OR", "HASTOKEN", "HAS", "HASANY",
"HASALL", "SEARCH", "BOOL", "NUMBER", "QUOTED_TEXT", "KEY", "WS", "FREETEXT",
"HASALL", "BOOL", "NUMBER", "QUOTED_TEXT", "KEY", "WS", "FREETEXT",
}
staticData.RuleNames = []string{
"query", "expression", "orExpression", "andExpression", "unaryExpression",
@@ -50,7 +50,7 @@ func filterqueryParserInit() {
}
staticData.PredictionContextCache = antlr.NewPredictionContextCache()
staticData.serializedATN = []int32{
4, 1, 33, 219, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7,
4, 1, 32, 219, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7,
4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7,
10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15,
2, 16, 7, 16, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 5, 2, 43,
@@ -71,8 +71,8 @@ func filterqueryParserInit() {
8, 12, 10, 12, 12, 12, 204, 9, 12, 1, 13, 1, 13, 1, 13, 3, 13, 209, 8,
13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 16, 0, 0,
17, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 0, 5,
1, 0, 7, 8, 1, 0, 13, 14, 2, 0, 30, 30, 33, 33, 1, 0, 23, 27, 1, 0, 28,
31, 235, 0, 34, 1, 0, 0, 0, 2, 37, 1, 0, 0, 0, 4, 39, 1, 0, 0, 0, 6, 47,
1, 0, 7, 8, 1, 0, 13, 14, 2, 0, 29, 29, 32, 32, 1, 0, 23, 26, 1, 0, 27,
30, 235, 0, 34, 1, 0, 0, 0, 2, 37, 1, 0, 0, 0, 4, 39, 1, 0, 0, 0, 6, 47,
1, 0, 0, 0, 8, 57, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 149, 1, 0, 0, 0,
14, 163, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190,
1, 0, 0, 0, 22, 192, 1, 0, 0, 0, 24, 197, 1, 0, 0, 0, 26, 208, 1, 0, 0,
@@ -142,7 +142,7 @@ func filterqueryParserInit() {
14, 0, 208, 205, 1, 0, 0, 0, 208, 206, 1, 0, 0, 0, 208, 207, 1, 0, 0, 0,
209, 27, 1, 0, 0, 0, 210, 211, 5, 3, 0, 0, 211, 212, 3, 18, 9, 0, 212,
213, 5, 4, 0, 0, 213, 29, 1, 0, 0, 0, 214, 215, 7, 4, 0, 0, 215, 31, 1,
0, 0, 0, 216, 217, 5, 31, 0, 0, 217, 33, 1, 0, 0, 0, 11, 44, 51, 53, 57,
0, 0, 0, 216, 217, 5, 30, 0, 0, 217, 33, 1, 0, 0, 0, 11, 44, 51, 53, 57,
70, 149, 163, 180, 187, 202, 208,
}
deserializer := antlr.NewATNDeserializer(nil)
@@ -208,13 +208,12 @@ const (
FilterQueryParserHAS = 24
FilterQueryParserHASANY = 25
FilterQueryParserHASALL = 26
FilterQueryParserSEARCH = 27
FilterQueryParserBOOL = 28
FilterQueryParserNUMBER = 29
FilterQueryParserQUOTED_TEXT = 30
FilterQueryParserKEY = 31
FilterQueryParserWS = 32
FilterQueryParserFREETEXT = 33
FilterQueryParserBOOL = 27
FilterQueryParserNUMBER = 28
FilterQueryParserQUOTED_TEXT = 29
FilterQueryParserKEY = 30
FilterQueryParserWS = 31
FilterQueryParserFREETEXT = 32
)
// FilterQueryParser rules.
@@ -804,7 +803,7 @@ func (p *FilterQueryParser) AndExpression() (localctx IAndExpressionContext) {
}
_la = p.GetTokenStream().LA(1)
for (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&12879659010) != 0 {
for (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&6437208066) != 0 {
p.SetState(51)
p.GetErrorHandler().Sync(p)
if p.HasError() {
@@ -826,7 +825,7 @@ func (p *FilterQueryParser) AndExpression() (localctx IAndExpressionContext) {
p.UnaryExpression()
}
case FilterQueryParserLPAREN, FilterQueryParserNOT, FilterQueryParserHASTOKEN, FilterQueryParserHAS, FilterQueryParserHASANY, FilterQueryParserHASALL, FilterQueryParserSEARCH, FilterQueryParserBOOL, FilterQueryParserNUMBER, FilterQueryParserQUOTED_TEXT, FilterQueryParserKEY, FilterQueryParserFREETEXT:
case FilterQueryParserLPAREN, FilterQueryParserNOT, FilterQueryParserHASTOKEN, FilterQueryParserHAS, FilterQueryParserHASANY, FilterQueryParserHASALL, FilterQueryParserBOOL, FilterQueryParserNUMBER, FilterQueryParserQUOTED_TEXT, FilterQueryParserKEY, FilterQueryParserFREETEXT:
{
p.SetState(50)
p.UnaryExpression()
@@ -2749,7 +2748,6 @@ type IFunctionCallContext interface {
HAS() antlr.TerminalNode
HASANY() antlr.TerminalNode
HASALL() antlr.TerminalNode
SEARCH() antlr.TerminalNode
// IsFunctionCallContext differentiates from other interfaces.
IsFunctionCallContext()
@@ -2827,10 +2825,6 @@ func (s *FunctionCallContext) HASALL() antlr.TerminalNode {
return s.GetToken(FilterQueryParserHASALL, 0)
}
func (s *FunctionCallContext) SEARCH() antlr.TerminalNode {
return s.GetToken(FilterQueryParserSEARCH, 0)
}
func (s *FunctionCallContext) GetRuleContext() antlr.RuleContext {
return s
}
@@ -2871,7 +2865,7 @@ func (p *FilterQueryParser) FunctionCall() (localctx IFunctionCallContext) {
p.SetState(192)
_la = p.GetTokenStream().LA(1)
if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&260046848) != 0) {
if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&125829120) != 0) {
p.GetErrorHandler().RecoverInline(p)
} else {
p.GetErrorHandler().ReportMatch(p)
@@ -3511,7 +3505,7 @@ func (p *FilterQueryParser) Value() (localctx IValueContext) {
p.SetState(214)
_la = p.GetTokenStream().LA(1)
if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&4026531840) != 0) {
if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&2013265920) != 0) {
p.GetErrorHandler().RecoverInline(p)
} else {
p.GetErrorHandler().ReportMatch(p)

View File

@@ -1343,7 +1343,7 @@ func getLocalTableName(tableName string) string {
}
func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params *retentiontypes.TTLParams) (*retentiontypes.SetTTLResponseItem, *model.ApiError) {
func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError) {
ctx = ctxtypes.NewContextWithCommentVals(ctx, map[string]string{
instrumentationtypes.TelemetrySignal: telemetrytypes.SignalLogs.StringValue(),
instrumentationtypes.CodeNamespace: "clickhouse-reader",
@@ -1378,7 +1378,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
if apiErr != nil {
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
}
if statusItem.Status == retentiontypes.TTLSettingStatusPending {
if statusItem.Status == constants.StatusPending {
return nil, &model.ApiError{Typ: model.ErrorConflict, Err: fmt.Errorf("TTL is already running")}
}
}
@@ -1437,7 +1437,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
TransactionID: uuid,
TableName: tableName,
TTL: int(params.DelDuration),
Status: retentiontypes.TTLSettingStatusPending,
Status: constants.StatusPending,
ColdStorageTTL: coldStorageDuration,
OrgID: orgID,
}
@@ -1463,7 +1463,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
NewUpdate().
Model(new(retentiontypes.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", retentiontypes.TTLSettingStatusFailed).
Set("status = ?", constants.StatusFailed).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -1483,7 +1483,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
NewUpdate().
Model(new(retentiontypes.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", retentiontypes.TTLSettingStatusFailed).
Set("status = ?", constants.StatusFailed).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -1498,7 +1498,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
NewUpdate().
Model(new(retentiontypes.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", retentiontypes.TTLSettingStatusSuccess).
Set("status = ?", constants.StatusSuccess).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -1508,10 +1508,10 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
}
}(ttlPayload)
return &retentiontypes.SetTTLResponseItem{Message: "move ttl has been successfully set up"}, nil
return &model.SetTTLResponseItem{Message: "move ttl has been successfully set up"}, nil
}
func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, params *retentiontypes.TTLParams) (*retentiontypes.SetTTLResponseItem, *model.ApiError) {
func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, params *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError) {
ctx = ctxtypes.NewContextWithCommentVals(ctx, map[string]string{
instrumentationtypes.TelemetrySignal: telemetrytypes.SignalTraces.StringValue(),
instrumentationtypes.CodeNamespace: "clickhouse-reader",
@@ -1541,7 +1541,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
if apiErr != nil {
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
}
if statusItem.Status == retentiontypes.TTLSettingStatusPending {
if statusItem.Status == constants.StatusPending {
return nil, &model.ApiError{Typ: model.ErrorConflict, Err: fmt.Errorf("TTL is already running")}
}
}
@@ -1575,7 +1575,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
TransactionID: uuid,
TableName: tableName,
TTL: int(params.DelDuration),
Status: retentiontypes.TTLSettingStatusPending,
Status: constants.StatusPending,
ColdStorageTTL: coldStorageDuration,
OrgID: orgID,
}
@@ -1613,7 +1613,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
NewUpdate().
Model(new(retentiontypes.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", retentiontypes.TTLSettingStatusFailed).
Set("status = ?", constants.StatusFailed).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -1634,7 +1634,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
NewUpdate().
Model(new(retentiontypes.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", retentiontypes.TTLSettingStatusFailed).
Set("status = ?", constants.StatusFailed).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -1649,7 +1649,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
NewUpdate().
Model(new(retentiontypes.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", retentiontypes.TTLSettingStatusSuccess).
Set("status = ?", constants.StatusSuccess).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -1658,7 +1658,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
}
}(distributedTableName)
}
return &retentiontypes.SetTTLResponseItem{Message: "move ttl has been successfully set up"}, nil
return &model.SetTTLResponseItem{Message: "move ttl has been successfully set up"}, nil
}
func (r *ClickHouseReader) hasCustomRetentionColumn(ctx context.Context) (bool, error) {
@@ -1687,7 +1687,7 @@ func (r *ClickHouseReader) hasCustomRetentionColumn(ctx context.Context) (bool,
return true, nil
}
func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *retentiontypes.CustomRetentionTTLParams) (*retentiontypes.CustomRetentionTTLResponse, error) {
func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *model.CustomRetentionTTLParams) (*model.CustomRetentionTTLResponse, error) {
ctx = ctxtypes.NewContextWithCommentVals(ctx, map[string]string{
instrumentationtypes.TelemetrySignal: telemetrytypes.SignalLogs.StringValue(),
@@ -1702,7 +1702,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *r
if !hasCustomRetention {
r.logger.Info("Custom retention not supported, falling back to standard TTL method", "orgID", orgID)
ttlParams := &retentiontypes.TTLParams{
ttlParams := &model.TTLParams{
Type: params.Type,
DelDuration: int64(params.DefaultTTLDays * 24 * 3600),
}
@@ -1723,7 +1723,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *r
return nil, errorsV2.Wrapf(apiErr.Err, errorsV2.TypeInternal, errorsV2.CodeInternal, "failed to set standard TTL")
}
return &retentiontypes.CustomRetentionTTLResponse{
return &model.CustomRetentionTTLResponse{
Message: fmt.Sprintf("Custom retention not supported, applied standard TTL of %d days. %s", params.DefaultTTLDays, ttlResult.Message),
}, nil
}
@@ -1734,7 +1734,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *r
uuidWithHyphen := valuer.GenerateUUID()
uuid := strings.Replace(uuidWithHyphen.String(), "-", "", -1)
if params.Type != retentiontypes.LogsTTL {
if params.Type != constants.LogsTTL {
return nil, errorsV2.Newf(errorsV2.TypeInternal, errorsV2.CodeInternal, "custom retention TTL only supported for logs")
}
@@ -1765,7 +1765,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *r
if apiErr != nil {
return nil, errorsV2.Newf(errorsV2.TypeInternal, errorsV2.CodeInternal, "error in processing custom_retention_ttl_status check sql query")
}
if statusItem.Status == retentiontypes.TTLSettingStatusPending {
if statusItem.Status == constants.StatusPending {
return nil, errorsV2.Newf(errorsV2.TypeInternal, errorsV2.CodeInternal, "custom retention TTL is already running")
}
}
@@ -1851,7 +1851,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *r
TableName: tableName,
TTL: params.DefaultTTLDays,
Condition: string(ttlConditionsJSON),
Status: retentiontypes.TTLSettingStatusPending,
Status: constants.StatusPending,
ColdStorageTTL: coldStorageDuration,
OrgID: orgID,
}
@@ -1867,7 +1867,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *r
err := r.setColdStorage(ctx, tableName, params.ColdStorageVolume)
if err != nil {
r.logger.Error("error in setting cold storage", errorsV2.Attr(err))
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, retentiontypes.TTLSettingStatusFailed)
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, constants.StatusFailed)
return nil, errorsV2.Wrapf(err.Err, errorsV2.TypeInternal, errorsV2.CodeInternal, "error setting cold storage for table %s", tableName)
}
}
@@ -1876,21 +1876,21 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *r
r.logger.Debug("Executing custom retention TTL request: ", "request", query, "step", i+1)
if err := r.db.Exec(ctx, query); err != nil {
r.logger.Error("error while setting custom retention ttl", errorsV2.Attr(err))
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, retentiontypes.TTLSettingStatusFailed)
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, constants.StatusFailed)
return nil, errorsV2.Wrapf(err, errorsV2.TypeInternal, errorsV2.CodeInternal, "error setting custom retention TTL for table %s, query: %s", tableName, query)
}
}
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, retentiontypes.TTLSettingStatusSuccess)
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, constants.StatusSuccess)
}
return &retentiontypes.CustomRetentionTTLResponse{
return &model.CustomRetentionTTLResponse{
Message: "custom retention TTL has been successfully set up",
}, nil
}
// New method to build multiIf expressions with support for multiple AND conditions
func (r *ClickHouseReader) buildMultiIfExpression(ttlConditions []retentiontypes.CustomRetentionRule, defaultTTLDays int, isResourceTable bool) string {
func (r *ClickHouseReader) buildMultiIfExpression(ttlConditions []model.CustomRetentionRule, defaultTTLDays int, isResourceTable bool) string {
var conditions []string
for i, rule := range ttlConditions {
@@ -1962,7 +1962,7 @@ func (r *ClickHouseReader) buildMultiIfExpression(ttlConditions []retentiontypes
return result
}
func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID string) (*retentiontypes.GetCustomRetentionTTLResponse, error) {
func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID string) (*model.GetCustomRetentionTTLResponse, error) {
// Check if V2 (custom retention) is supported
hasCustomRetention, err := r.hasCustomRetentionColumn(ctx)
if err != nil {
@@ -1971,7 +1971,7 @@ func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID stri
hasCustomRetention = false
}
response := &retentiontypes.GetCustomRetentionTTLResponse{}
response := &model.GetCustomRetentionTTLResponse{}
if hasCustomRetention {
// V2 - Custom retention is supported
@@ -1994,19 +1994,19 @@ func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID stri
if err == sql.ErrNoRows {
// No V2 configuration found, return defaults
response.DefaultTTLDays = retentiontypes.DefaultLogsRetentionDays
response.TTLConditions = []retentiontypes.CustomRetentionRule{}
response.Status = retentiontypes.TTLSettingStatusSuccess
response.DefaultTTLDays = 15
response.TTLConditions = []model.CustomRetentionRule{}
response.Status = constants.StatusSuccess
response.ColdStorageTTLDays = -1
return response, nil
}
// Parse TTL conditions from Condition
var ttlConditions []retentiontypes.CustomRetentionRule
var ttlConditions []model.CustomRetentionRule
if customTTL.Condition != "" {
if err := json.Unmarshal([]byte(customTTL.Condition), &ttlConditions); err != nil {
r.logger.Error("Error parsing TTL conditions", errorsV2.Attr(err))
ttlConditions = []retentiontypes.CustomRetentionRule{}
ttlConditions = []model.CustomRetentionRule{}
}
}
@@ -2020,8 +2020,8 @@ func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID stri
response.Version = "v1"
// Get V1 TTL configuration
ttlParams := &retentiontypes.GetTTLParams{
Type: retentiontypes.LogsTTL,
ttlParams := &model.GetTTLParams{
Type: constants.LogsTTL,
}
ttlResult, apiErr := r.GetTTL(ctx, orgID, ttlParams)
@@ -2041,7 +2041,7 @@ func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID stri
}
// For V1, we don't have TTL conditions
response.TTLConditions = []retentiontypes.CustomRetentionRule{}
response.TTLConditions = []model.CustomRetentionRule{}
}
return response, nil
@@ -2081,7 +2081,7 @@ func (r *ClickHouseReader) updateCustomRetentionTTLStatus(ctx context.Context, o
}
// Enhanced validation function with duplicate detection and efficient key validation
func (r *ClickHouseReader) validateTTLConditions(ctx context.Context, ttlConditions []retentiontypes.CustomRetentionRule) error {
func (r *ClickHouseReader) validateTTLConditions(ctx context.Context, ttlConditions []model.CustomRetentionRule) error {
ctx = ctxtypes.NewContextWithCommentVals(ctx, map[string]string{
instrumentationtypes.CodeNamespace: "clickhouse-reader",
instrumentationtypes.CodeFunctionName: "validateTTLConditions",
@@ -2185,16 +2185,16 @@ func (r *ClickHouseReader) validateTTLConditions(ctx context.Context, ttlConditi
// SetTTL sets the TTL for traces or metrics or logs tables.
// This is an async API which creates goroutines to set TTL.
// Status of TTL update is tracked with ttl_status table in sqlite db.
func (r *ClickHouseReader) SetTTL(ctx context.Context, orgID string, params *retentiontypes.TTLParams) (*retentiontypes.SetTTLResponseItem, *model.ApiError) {
func (r *ClickHouseReader) SetTTL(ctx context.Context, orgID string, params *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError) {
// Keep only latest 100 transactions/requests
r.deleteTtlTransactions(ctx, orgID, 100)
switch params.Type {
case retentiontypes.TraceTTL:
case constants.TraceTTL:
return r.setTTLTraces(ctx, orgID, params)
case retentiontypes.MetricsTTL:
case constants.MetricsTTL:
return r.setTTLMetrics(ctx, orgID, params)
case retentiontypes.LogsTTL:
case constants.LogsTTL:
return r.setTTLLogs(ctx, orgID, params)
default:
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while setting ttl. ttl type should be <metrics|traces>, got %v", params.Type)}
@@ -2202,7 +2202,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context, orgID string, params *ret
}
func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, params *retentiontypes.TTLParams) (*retentiontypes.SetTTLResponseItem, *model.ApiError) {
func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, params *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError) {
ctx = ctxtypes.NewContextWithCommentVals(ctx, map[string]string{
instrumentationtypes.TelemetrySignal: telemetrytypes.SignalMetrics.StringValue(),
instrumentationtypes.CodeNamespace: "clickhouse-reader",
@@ -2231,7 +2231,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
if apiErr != nil {
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
}
if statusItem.Status == retentiontypes.TTLSettingStatusPending {
if statusItem.Status == constants.StatusPending {
return nil, &model.ApiError{Typ: model.ErrorConflict, Err: fmt.Errorf("TTL is already running")}
}
}
@@ -2247,7 +2247,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
TransactionID: uuid,
TableName: tableName,
TTL: int(params.DelDuration),
Status: retentiontypes.TTLSettingStatusPending,
Status: constants.StatusPending,
ColdStorageTTL: coldStorageDuration,
OrgID: orgID,
}
@@ -2285,7 +2285,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
NewUpdate().
Model(new(retentiontypes.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", retentiontypes.TTLSettingStatusFailed).
Set("status = ?", constants.StatusFailed).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -2306,7 +2306,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
NewUpdate().
Model(new(retentiontypes.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", retentiontypes.TTLSettingStatusFailed).
Set("status = ?", constants.StatusFailed).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -2321,7 +2321,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
NewUpdate().
Model(new(retentiontypes.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", retentiontypes.TTLSettingStatusSuccess).
Set("status = ?", constants.StatusSuccess).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -2332,7 +2332,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
for _, tableName := range tableNames {
go metricTTL(tableName)
}
return &retentiontypes.SetTTLResponseItem{Message: "move ttl has been successfully set up"}, nil
return &model.SetTTLResponseItem{Message: "move ttl has been successfully set up"}, nil
}
func (r *ClickHouseReader) deleteTtlTransactions(ctx context.Context, orgID string, numberOfTransactionsStore int) {
@@ -2389,7 +2389,7 @@ func (r *ClickHouseReader) checkTTLStatusItem(ctx context.Context, orgID string,
// getTTLQueryStatus fetches ttl_status table status from DB
func (r *ClickHouseReader) getTTLQueryStatus(ctx context.Context, orgID string, tableNameArray []string) (string, *model.ApiError) {
failFlag := false
status := retentiontypes.TTLSettingStatusSuccess
status := constants.StatusSuccess
for _, tableName := range tableNameArray {
statusItem, apiErr := r.checkTTLStatusItem(ctx, orgID, tableName)
emptyStatusStruct := new(retentiontypes.TTLSetting)
@@ -2399,16 +2399,16 @@ func (r *ClickHouseReader) getTTLQueryStatus(ctx context.Context, orgID string,
if apiErr != nil {
return "", &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
}
if statusItem.Status == retentiontypes.TTLSettingStatusPending && statusItem.UpdatedAt.Unix()-time.Now().Unix() < 3600 {
status = retentiontypes.TTLSettingStatusPending
if statusItem.Status == constants.StatusPending && statusItem.UpdatedAt.Unix()-time.Now().Unix() < 3600 {
status = constants.StatusPending
return status, nil
}
if statusItem.Status == retentiontypes.TTLSettingStatusFailed {
if statusItem.Status == constants.StatusFailed {
failFlag = true
}
}
if failFlag {
status = retentiontypes.TTLSettingStatusFailed
status = constants.StatusFailed
}
return status, nil
@@ -2461,7 +2461,7 @@ func getLocalTableNameArray(tableNames []string) []string {
}
// GetTTL returns current ttl, expected ttl and past setTTL status for metrics/traces.
func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *retentiontypes.GetTTLParams) (*retentiontypes.GetTTLResponseItem, *model.ApiError) {
func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *model.GetTTLParams) (*model.GetTTLResponseItem, *model.ApiError) {
ctx = ctxtypes.NewContextWithCommentVals(ctx, map[string]string{
instrumentationtypes.CodeNamespace: "clickhouse-reader",
@@ -2496,8 +2496,8 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
return delTTL, moveTTL
}
getMetricsTTL := func() (*retentiontypes.DBResponseTTL, *model.ApiError) {
var dbResp []retentiontypes.DBResponseTTL
getMetricsTTL := func() (*model.DBResponseTTL, *model.ApiError) {
var dbResp []model.DBResponseTTL
query := fmt.Sprintf("SELECT engine_full FROM system.tables WHERE name='%v'", signozSampleLocalTableName)
@@ -2514,8 +2514,8 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
}
}
getTracesTTL := func() (*retentiontypes.DBResponseTTL, *model.ApiError) {
var dbResp []retentiontypes.DBResponseTTL
getTracesTTL := func() (*model.DBResponseTTL, *model.ApiError) {
var dbResp []model.DBResponseTTL
query := fmt.Sprintf("SELECT engine_full FROM system.tables WHERE name='%v' AND database='%v'", r.traceLocalTableName, signozTraceDBName)
@@ -2532,8 +2532,8 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
}
}
getLogsTTL := func() (*retentiontypes.DBResponseTTL, *model.ApiError) {
var dbResp []retentiontypes.DBResponseTTL
getLogsTTL := func() (*model.DBResponseTTL, *model.ApiError) {
var dbResp []model.DBResponseTTL
query := fmt.Sprintf("SELECT engine_full FROM system.tables WHERE name='%v' AND database='%v'", r.logsLocalTableName, r.logsDB)
@@ -2551,7 +2551,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
}
switch ttlParams.Type {
case retentiontypes.TraceTTL:
case constants.TraceTTL:
tableNameArray := []string{
r.TraceDB + "." + r.traceTableName,
r.TraceDB + "." + r.traceResourceTableV3,
@@ -2579,9 +2579,9 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
}
delTTL, moveTTL := parseTTL(dbResp.EngineFull)
return &retentiontypes.GetTTLResponseItem{TracesTime: delTTL, TracesMoveTime: moveTTL, ExpectedTracesTime: ttlQuery.TTL, ExpectedTracesMoveTime: ttlQuery.ColdStorageTTL, Status: status}, nil
return &model.GetTTLResponseItem{TracesTime: delTTL, TracesMoveTime: moveTTL, ExpectedTracesTime: ttlQuery.TTL, ExpectedTracesMoveTime: ttlQuery.ColdStorageTTL, Status: status}, nil
case retentiontypes.MetricsTTL:
case constants.MetricsTTL:
tableNameArray := []string{signozMetricDBName + "." + signozSampleTableName}
tableNameArray = getLocalTableNameArray(tableNameArray)
status, apiErr := r.getTTLQueryStatus(ctx, orgID, tableNameArray)
@@ -2602,9 +2602,9 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
}
delTTL, moveTTL := parseTTL(dbResp.EngineFull)
return &retentiontypes.GetTTLResponseItem{MetricsTime: delTTL, MetricsMoveTime: moveTTL, ExpectedMetricsTime: ttlQuery.TTL, ExpectedMetricsMoveTime: ttlQuery.ColdStorageTTL, Status: status}, nil
return &model.GetTTLResponseItem{MetricsTime: delTTL, MetricsMoveTime: moveTTL, ExpectedMetricsTime: ttlQuery.TTL, ExpectedMetricsMoveTime: ttlQuery.ColdStorageTTL, Status: status}, nil
case retentiontypes.LogsTTL:
case constants.LogsTTL:
tableNameArray := []string{r.logsDB + "." + r.logsTableName}
tableNameArray = getLocalTableNameArray(tableNameArray)
status, apiErr := r.getTTLQueryStatus(ctx, orgID, tableNameArray)
@@ -2625,7 +2625,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
}
delTTL, moveTTL := parseTTL(dbResp.EngineFull)
return &retentiontypes.GetTTLResponseItem{LogsTime: delTTL, LogsMoveTime: moveTTL, ExpectedLogsTime: ttlQuery.TTL, ExpectedLogsMoveTime: ttlQuery.ColdStorageTTL, Status: status}, nil
return &model.GetTTLResponseItem{LogsTime: delTTL, LogsMoveTime: moveTTL, ExpectedLogsTime: ttlQuery.TTL, ExpectedLogsMoveTime: ttlQuery.ColdStorageTTL, Status: status}, nil
default:
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting ttl. ttl type should be metrics|traces, got %v",

View File

@@ -34,7 +34,6 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/gorilla/mux"
@@ -1678,7 +1677,7 @@ func (aH *APIHandler) setCustomRetentionTTL(w http.ResponseWriter, r *http.Reque
return
}
var params retentiontypes.CustomRetentionTTLParams
var params model.CustomRetentionTTLParams
if err := json.NewDecoder(r.Body).Decode(&params); err != nil {
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, "Invalid data"))
return

View File

@@ -40,7 +40,6 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/postprocess"
"github.com/SigNoz/signoz/pkg/query-service/utils"
querytemplate "github.com/SigNoz/signoz/pkg/query-service/utils/queryTemplate"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
chVariables "github.com/SigNoz/signoz/pkg/variables/clickhouse"
)
@@ -420,7 +419,7 @@ func parseTime(param string, r *http.Request) (*time.Time, error) {
}
func parseTTLParams(r *http.Request) (*retentiontypes.TTLParams, error) {
func parseTTLParams(r *http.Request) (*model.TTLParams, error) {
// make sure either of the query params are present
typeTTL := r.URL.Query().Get("type")
@@ -433,7 +432,7 @@ func parseTTLParams(r *http.Request) (*retentiontypes.TTLParams, error) {
}
// Validate the type parameter
if typeTTL != retentiontypes.TraceTTL && typeTTL != retentiontypes.MetricsTTL && typeTTL != retentiontypes.LogsTTL {
if typeTTL != baseconstants.TraceTTL && typeTTL != baseconstants.MetricsTTL && typeTTL != baseconstants.LogsTTL {
return nil, fmt.Errorf("type param should be metrics|traces|logs, got %v", typeTTL)
}
@@ -456,7 +455,7 @@ func parseTTLParams(r *http.Request) (*retentiontypes.TTLParams, error) {
}
}
return &retentiontypes.TTLParams{
return &model.TTLParams{
Type: typeTTL,
DelDuration: int64(durationParsed.Seconds()),
ColdStorageVolume: coldStorage,
@@ -464,7 +463,7 @@ func parseTTLParams(r *http.Request) (*retentiontypes.TTLParams, error) {
}, nil
}
func parseGetTTL(r *http.Request) (*retentiontypes.GetTTLParams, error) {
func parseGetTTL(r *http.Request) (*model.GetTTLParams, error) {
typeTTL := r.URL.Query().Get("type")
@@ -472,12 +471,12 @@ func parseGetTTL(r *http.Request) (*retentiontypes.GetTTLParams, error) {
return nil, fmt.Errorf("type param cannot be empty from the query")
} else {
// Validate the type parameter
if typeTTL != retentiontypes.TraceTTL && typeTTL != retentiontypes.MetricsTTL && typeTTL != retentiontypes.LogsTTL {
if typeTTL != baseconstants.TraceTTL && typeTTL != baseconstants.MetricsTTL && typeTTL != baseconstants.LogsTTL {
return nil, fmt.Errorf("type param should be metrics|traces|logs, got %v", typeTTL)
}
}
return &retentiontypes.GetTTLParams{Type: typeTTL}, nil
return &model.GetTTLParams{Type: typeTTL}, nil
}
func parseAggregateAttributeRequest(r *http.Request) (*v3.AggregateAttributeRequest, error) {

View File

@@ -19,6 +19,10 @@ const (
const MaxAllowedPointsInTimeSeries = 300
const TraceTTL = "traces"
const MetricsTTL = "metrics"
const LogsTTL = "logs"
const SpanSearchScopeRoot = "isroot"
const SpanSearchScopeEntryPoint = "isentrypoint"
const OrderBySpanCount = "span_count"

View File

@@ -7,7 +7,6 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/model"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/querycache"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/util/stats"
@@ -24,8 +23,8 @@ type Reader interface {
GetServicesList(ctx context.Context) (*[]string, error)
GetDependencyGraph(ctx context.Context, query *model.GetServicesParams) (*[]model.ServiceMapDependencyResponseItem, error)
GetTTL(ctx context.Context, orgID string, ttlParams *retentiontypes.GetTTLParams) (*retentiontypes.GetTTLResponseItem, *model.ApiError)
GetCustomRetentionTTL(ctx context.Context, orgID string) (*retentiontypes.GetCustomRetentionTTLResponse, error)
GetTTL(ctx context.Context, orgID string, ttlParams *model.GetTTLParams) (*model.GetTTLResponseItem, *model.ApiError)
GetCustomRetentionTTL(ctx context.Context, orgID string) (*model.GetCustomRetentionTTLResponse, error)
// GetDisks returns a list of disks configured in the underlying DB. It is supported by
// clickhouse only.
@@ -47,8 +46,8 @@ type Reader interface {
GetFlamegraphSpansForTrace(ctx context.Context, orgID valuer.UUID, traceID string, req *model.GetFlamegraphSpansForTraceParams) (*model.GetFlamegraphSpansForTraceResponse, error)
// Setter Interfaces
SetTTL(ctx context.Context, orgID string, ttlParams *retentiontypes.TTLParams) (*retentiontypes.SetTTLResponseItem, *model.ApiError)
SetTTLV2(ctx context.Context, orgID string, params *retentiontypes.CustomRetentionTTLParams) (*retentiontypes.CustomRetentionTTLResponse, error)
SetTTL(ctx context.Context, orgID string, ttlParams *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError)
SetTTLV2(ctx context.Context, orgID string, params *model.CustomRetentionTTLParams) (*model.CustomRetentionTTLResponse, error)
FetchTemporality(ctx context.Context, orgID valuer.UUID, metricNames []string) (map[string]map[v3.Temporality]bool, error)
GetMetricAggregateAttributes(ctx context.Context, orgID valuer.UUID, req *v3.AggregateAttributeRequest, skipSignozMetrics bool) (*v3.AggregateAttributeResponse, error)

View File

@@ -404,6 +404,56 @@ type TagKey struct {
Type TagDataType `json:"type"`
}
type TTLParams struct {
Type string // It can be one of {traces, metrics}.
ColdStorageVolume string // Name of the cold storage volume.
ToColdStorageDuration int64 // Seconds after which data will be moved to cold storage.
DelDuration int64 // Seconds after which data will be deleted.
}
type CustomRetentionTTLParams struct {
Type string `json:"type"`
DefaultTTLDays int `json:"defaultTTLDays"`
TTLConditions []CustomRetentionRule `json:"ttlConditions"`
ColdStorageVolume string `json:"coldStorageVolume,omitempty"`
ToColdStorageDurationDays int64 `json:"coldStorageDurationDays,omitempty"`
}
type CustomRetentionRule struct {
Filters []FilterCondition `json:"conditions"`
TTLDays int `json:"ttlDays"`
}
type FilterCondition struct {
Key string `json:"key"`
Values []string `json:"values"`
}
type GetCustomRetentionTTLResponse struct {
Version string `json:"version"`
Status string `json:"status"`
// V1 fields
// LogsTime int `json:"logs_ttl_duration_hrs,omitempty"`
// LogsMoveTime int `json:"logs_move_ttl_duration_hrs,omitempty"`
ExpectedLogsTime int `json:"expected_logs_ttl_duration_hrs,omitempty"`
ExpectedLogsMoveTime int `json:"expected_logs_move_ttl_duration_hrs,omitempty"`
// V2 fields
DefaultTTLDays int `json:"default_ttl_days,omitempty"`
TTLConditions []CustomRetentionRule `json:"ttl_conditions,omitempty"`
ColdStorageVolume string `json:"cold_storage_volume,omitempty"`
ColdStorageTTLDays int `json:"cold_storage_ttl_days,omitempty"`
}
type CustomRetentionTTLResponse struct {
Message string `json:"message"`
}
type GetTTLParams struct {
Type string
}
type ListErrorsParams struct {
StartStr string `json:"start"`
EndStr string `json:"end"`

View File

@@ -150,6 +150,16 @@ type RuleResponseItem struct {
Data string `json:"data" db:"data"`
}
type TTLStatusItem struct {
Id int `json:"id" db:"id"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
TableName string `json:"table_name" db:"table_name"`
TTL int `json:"ttl" db:"ttl"`
Status string `json:"status" db:"status"`
ColdStorageTtl int `json:"cold_storage_ttl" db:"cold_storage_ttl"`
}
type ChannelItem struct {
Id int `json:"id" db:"id"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
@@ -452,11 +462,35 @@ type SpanAggregatesDBResponseItem struct {
GroupBy string `ch:"groupBy"`
}
type SetTTLResponseItem struct {
Message string `json:"message"`
}
type DiskItem struct {
Name string `json:"name,omitempty" ch:"name"`
Type string `json:"type,omitempty" ch:"type"`
}
type DBResponseTTL struct {
EngineFull string `ch:"engine_full"`
}
type GetTTLResponseItem struct {
MetricsTime int `json:"metrics_ttl_duration_hrs,omitempty"`
MetricsMoveTime int `json:"metrics_move_ttl_duration_hrs,omitempty"`
TracesTime int `json:"traces_ttl_duration_hrs,omitempty"`
TracesMoveTime int `json:"traces_move_ttl_duration_hrs,omitempty"`
LogsTime int `json:"logs_ttl_duration_hrs,omitempty"`
LogsMoveTime int `json:"logs_move_ttl_duration_hrs,omitempty"`
ExpectedMetricsTime int `json:"expected_metrics_ttl_duration_hrs,omitempty"`
ExpectedMetricsMoveTime int `json:"expected_metrics_move_ttl_duration_hrs,omitempty"`
ExpectedTracesTime int `json:"expected_traces_ttl_duration_hrs,omitempty"`
ExpectedTracesMoveTime int `json:"expected_traces_move_ttl_duration_hrs,omitempty"`
ExpectedLogsTime int `json:"expected_logs_ttl_duration_hrs,omitempty"`
ExpectedLogsMoveTime int `json:"expected_logs_move_ttl_duration_hrs,omitempty"`
Status string `json:"status"`
}
type DBResponseServiceName struct {
ServiceName string `ch:"serviceName"`
Count uint64 `ch:"count"`

View File

@@ -8,17 +8,6 @@ const (
// BodyFullTextSearchDefaultWarning is emitted when a full-text search or "body" searches are hit
// with New JSON Body enhancements.
BodyFullTextSearchDefaultWarning = "Full text searches default to `body.message:string`. Use `body.<key>` to search a different field inside body"
// FTSInternalKey is the sentinel Name on TelemetryFieldKey instances that represent
// wildcard map searches (all attribute/resource keys+values). The unconventional value
// prevents collision with any real field name a user could type.
FTSInternalKey = "_X_INTERNAL_FTS_KEY"
// SearchFunctionName is the grammar function name for full-text search.
SearchFunctionName = "search"
// FTSMaxWindowNs is the maximum allowed time range for a search() query (6 hours).
FTSMaxWindowNs = uint64(6 * 60 * 60 * 1_000_000_000)
)
var (

View File

@@ -46,7 +46,6 @@ type filterExpressionVisitor struct {
keysWithWarnings map[string]bool
startNs uint64
endNs uint64
ftsFieldKeys []*telemetrytypes.TelemetryFieldKey
}
type FilterExprVisitorOpts struct {
@@ -66,9 +65,6 @@ type FilterExprVisitorOpts struct {
Variables map[string]qbtypes.VariableItem
StartNs uint64
EndNs uint64
// FTSFieldKeys enables search() for this query context. nil disables search()
// (traces, metrics, and non-log callers leave this nil).
FTSFieldKeys []*telemetrytypes.TelemetryFieldKey
}
// newFilterExpressionVisitor creates a new filterExpressionVisitor.
@@ -91,7 +87,6 @@ func newFilterExpressionVisitor(opts FilterExprVisitorOpts) *filterExpressionVis
keysWithWarnings: make(map[string]bool),
startNs: opts.StartNs,
endNs: opts.EndNs,
ftsFieldKeys: opts.FTSFieldKeys,
}
}
@@ -339,9 +334,14 @@ func (v *filterExpressionVisitor) VisitPrimary(ctx *grammar.PrimaryContext) any
return SkipConditionLiteral
}
if v.fullTextColumn == nil {
v.errors = append(v.errors, "full text search is not supported")
return ErrorConditionLiteral
}
child := ctx.GetChild(0)
var searchText string
if keyCtx, ok := child.(*grammar.KeyContext); ok {
// create a full text search condition on the body field
searchText = keyCtx.GetText()
} else if valCtx, ok := child.(*grammar.ValueContext); ok {
if valCtx.QUOTED_TEXT() != nil {
@@ -357,15 +357,6 @@ func (v *filterExpressionVisitor) VisitPrimary(ctx *grammar.PrimaryContext) any
return ErrorConditionLiteral
}
}
if len(v.ftsFieldKeys) > 0 {
return v.runSearchFunction(searchText)
}
if v.fullTextColumn == nil {
v.errors = append(v.errors, "full text search is not supported")
return ErrorConditionLiteral
}
cond, err := v.conditionBuilder.ConditionFor(context.Background(), v.startNs, v.endNs, v.fullTextColumn, qbtypes.FilterOperatorRegexp, FormatFullTextSearch(searchText), v.builder)
if err != nil {
v.errors = append(v.errors, fmt.Sprintf("failed to build full text search condition: %s", err.Error()))
@@ -374,6 +365,7 @@ func (v *filterExpressionVisitor) VisitPrimary(ctx *grammar.PrimaryContext) any
if v.bodyJSONEnabled && v.fullTextColumn.Name == "body" {
v.warnings = append(v.warnings, BodyFullTextSearchDefaultWarning)
}
return cond
}
@@ -719,10 +711,6 @@ func (v *filterExpressionVisitor) VisitFullText(ctx *grammar.FullTextContext) an
text = ctx.FREETEXT().GetText()
}
if len(v.ftsFieldKeys) > 0 {
return v.runSearchFunction(text)
}
if v.fullTextColumn == nil {
v.errors = append(v.errors, "full text search is not supported")
return ErrorConditionLiteral
@@ -740,19 +728,12 @@ func (v *filterExpressionVisitor) VisitFullText(ctx *grammar.FullTextContext) an
return cond
}
// VisitFunctionCall handles function calls like has(), hasAny(), search(), etc.
// VisitFunctionCall handles function calls like has(), hasAny(), etc.
func (v *filterExpressionVisitor) VisitFunctionCall(ctx *grammar.FunctionCallContext) any {
if v.skipFunctionCalls {
return SkipConditionLiteral
}
// search() must be handled before visiting params: unquoted tokens like
// search(error) are parsed as a key context, and visiting them through VisitKey
// would append "key not found" errors before we can treat the text as a search string.
if ctx.SEARCH() != nil {
return v.visitSearchFunction(ctx)
}
// Get function name based on which token is present
var functionName string
if ctx.HAS() != nil {
@@ -861,63 +842,6 @@ func (v *filterExpressionVisitor) VisitFunctionCall(ctx *grammar.FunctionCallCon
return v.builder.Or(conds...)
}
// runSearchFunction fans a regex match for text across all ftsFieldKeys using OR.
// Used by both explicit search() calls and implicit bare-expression FTS for logs.
// Enforces the FTSMaxWindowNs guard so all callers share the same time-window limit.
func (v *filterExpressionVisitor) runSearchFunction(text string) any {
if v.endNs > 0 && v.startNs > 0 && (v.endNs-v.startNs) > FTSMaxWindowNs {
v.errors = append(v.errors, "full text search is restricted to a maximum of 6-hour time window")
return ErrorConditionLiteral
}
formattedText := FormatFullTextSearch(text)
var ftsConds []string
for _, key := range v.ftsFieldKeys {
cond, err := v.conditionBuilder.ConditionFor(v.context, v.startNs, v.endNs, key, qbtypes.FilterOperatorRegexp, formattedText, v.builder)
if err != nil {
continue
}
ftsConds = append(ftsConds, cond)
}
if len(ftsConds) == 0 {
return ErrorConditionLiteral
}
return v.builder.Or(ftsConds...)
}
// visitSearchFunction handles the search() function call.
// search('value') or search(value) fans out a regex match across all FTS column keys.
func (v *filterExpressionVisitor) visitSearchFunction(ctx *grammar.FunctionCallContext) any {
// ftsFieldKeys == nil means search() is not enabled for this signal/query type.
// Only log statement builders set FTSFieldKeys; traces/metrics leave it nil.
if len(v.ftsFieldKeys) == 0 {
v.errors = append(v.errors, "search() is only supported for log queries")
return ErrorConditionLiteral
}
// Extract the search text directly from the parse tree — bypass VisitKey so that
// unquoted tokens like search(error) don't trigger "key not found" errors.
paramCtxs := ctx.FunctionParamList().AllFunctionParam()
if len(paramCtxs) < 1 {
v.errors = append(v.errors, "search() requires a value parameter, e.g. search('error') or search(error)")
return ErrorConditionLiteral
}
paramCtx := paramCtxs[0]
var searchText string
if paramCtx.Value() != nil {
raw := v.Visit(paramCtx.Value())
searchText = fmt.Sprintf("%v", raw)
} else if paramCtx.Key() != nil {
// Unquoted word — use the raw token text, bypassing the key lookup.
searchText = paramCtx.Key().GetText()
} else {
v.errors = append(v.errors, "search() parameter must be a string value")
return ErrorConditionLiteral
}
return v.runSearchFunction(searchText)
}
// VisitFunctionParamList handles the parameter list for function calls.
func (v *filterExpressionVisitor) VisitFunctionParamList(ctx *grammar.FunctionParamListContext) any {
params := ctx.AllFunctionParam()

View File

@@ -23,7 +23,6 @@ import (
"github.com/SigNoz/signoz/pkg/global"
"github.com/SigNoz/signoz/pkg/identn"
"github.com/SigNoz/signoz/pkg/instrumentation"
"github.com/SigNoz/signoz/pkg/meterreporter"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
"github.com/SigNoz/signoz/pkg/modules/inframonitoring"
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
@@ -136,9 +135,6 @@ type Config struct {
// Auditor config
Auditor auditor.Config `mapstructure:"auditor"`
// MeterReporter config
MeterReporter meterreporter.Config `mapstructure:"meterreporter"`
// CloudIntegration config
CloudIntegration cloudintegration.Config `mapstructure:"cloudintegration"`
@@ -179,7 +175,6 @@ func NewConfig(ctx context.Context, logger *slog.Logger, resolverConfig config.R
identn.NewConfigFactory(),
serviceaccount.NewConfigFactory(),
auditor.NewConfigFactory(),
meterreporter.NewConfigFactory(),
cloudintegration.NewConfigFactory(),
tracedetail.NewConfigFactory(),
authz.NewConfigFactory(),

View File

@@ -16,7 +16,6 @@ import (
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
"github.com/SigNoz/signoz/pkg/modules/retention/implretention"
"github.com/SigNoz/signoz/pkg/modules/user/impluser"
"github.com/SigNoz/signoz/pkg/querier"
"github.com/SigNoz/signoz/pkg/queryparser"
@@ -53,8 +52,7 @@ func TestNewHandlers(t *testing.T) {
userRoleStore := impluser.NewUserRoleStore(sqlstore, providerSettings)
userGetter := impluser.NewGetter(impluser.NewStore(sqlstore, providerSettings), userRoleStore, flagger)
retentionGetter := implretention.NewGetter(implretention.NewStore(sqlstore))
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter, userRoleStore, nil, nil, retentionGetter, flagger)
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter, userRoleStore, nil, nil, flagger)
querierHandler := querier.NewHandler(providerSettings, nil, nil)
registryHandler := factory.NewHandler(nil)

View File

@@ -29,7 +29,6 @@ import (
"github.com/SigNoz/signoz/pkg/modules/quickfilter/implquickfilter"
"github.com/SigNoz/signoz/pkg/modules/rawdataexport"
"github.com/SigNoz/signoz/pkg/modules/rawdataexport/implrawdataexport"
"github.com/SigNoz/signoz/pkg/modules/retention"
"github.com/SigNoz/signoz/pkg/modules/rulestatehistory"
"github.com/SigNoz/signoz/pkg/modules/rulestatehistory/implrulestatehistory"
"github.com/SigNoz/signoz/pkg/modules/savedview"
@@ -64,7 +63,6 @@ type Modules struct {
Preference preference.Module
UserSetter user.Setter
UserGetter user.Getter
RetentionGetter retention.Getter
SavedView savedview.Module
Apdex apdex.Module
Dashboard dashboard.Module
@@ -105,7 +103,6 @@ func NewModules(
userRoleStore authtypes.UserRoleStore,
serviceAccount serviceaccount.Module,
cloudIntegrationModule cloudintegration.Module,
retentionGetter retention.Getter,
fl flagger.Flagger,
) Modules {
quickfilter := implquickfilter.NewModule(implquickfilter.NewStore(sqlstore))
@@ -122,7 +119,6 @@ func NewModules(
Dashboard: dashboard,
UserSetter: userSetter,
UserGetter: userGetter,
RetentionGetter: retentionGetter,
QuickFilter: quickfilter,
TraceFunnel: impltracefunnel.NewModule(impltracefunnel.NewStore(sqlstore)),
RawDataExport: implrawdataexport.NewModule(querier),

View File

@@ -16,7 +16,6 @@ import (
"github.com/SigNoz/signoz/pkg/modules/cloudintegration/implcloudintegration"
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
"github.com/SigNoz/signoz/pkg/modules/retention/implretention"
"github.com/SigNoz/signoz/pkg/modules/serviceaccount"
"github.com/SigNoz/signoz/pkg/modules/serviceaccount/implserviceaccount"
"github.com/SigNoz/signoz/pkg/modules/user/impluser"
@@ -57,8 +56,7 @@ func TestNewModules(t *testing.T) {
serviceAccount := implserviceaccount.NewModule(implserviceaccount.NewStore(sqlstore), nil, nil, nil, providerSettings, serviceaccount.Config{})
retentionGetter := implretention.NewGetter(implretention.NewStore(sqlstore))
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter, userRoleStore, serviceAccount, implcloudintegration.NewModule(), retentionGetter, flagger)
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter, userRoleStore, serviceAccount, implcloudintegration.NewModule(), flagger)
reflectVal := reflect.ValueOf(modules)
for i := 0; i < reflectVal.NumField(); i++ {

View File

@@ -28,8 +28,6 @@ import (
"github.com/SigNoz/signoz/pkg/identn/apikeyidentn"
"github.com/SigNoz/signoz/pkg/identn/impersonationidentn"
"github.com/SigNoz/signoz/pkg/identn/tokenizeridentn"
"github.com/SigNoz/signoz/pkg/meterreporter"
"github.com/SigNoz/signoz/pkg/meterreporter/noopmeterreporter"
"github.com/SigNoz/signoz/pkg/modules/authdomain/implauthdomain"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
@@ -321,12 +319,6 @@ func NewAuditorProviderFactories() factory.NamedMap[factory.ProviderFactory[audi
)
}
func NewMeterReporterProviderFactories() factory.NamedMap[factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config]] {
return factory.MustNewNamedMap(
noopmeterreporter.NewFactory(),
)
}
func NewFlaggerProviderFactories(registry featuretypes.Registry) factory.NamedMap[factory.ProviderFactory[flagger.FlaggerProvider, flagger.Config]] {
return factory.MustNewNamedMap(
configflagger.NewFactory(registry),

View File

@@ -22,13 +22,10 @@ import (
"github.com/SigNoz/signoz/pkg/identn"
"github.com/SigNoz/signoz/pkg/instrumentation"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/meterreporter"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
"github.com/SigNoz/signoz/pkg/modules/retention"
"github.com/SigNoz/signoz/pkg/modules/retention/implretention"
"github.com/SigNoz/signoz/pkg/modules/rulestatehistory"
"github.com/SigNoz/signoz/pkg/modules/serviceaccount"
"github.com/SigNoz/signoz/pkg/modules/serviceaccount/implserviceaccount"
@@ -87,7 +84,6 @@ type SigNoz struct {
Flagger flagger.Flagger
Gateway gateway.Gateway
Auditor auditor.Auditor
MeterReporter meterreporter.Reporter
}
func New(
@@ -108,7 +104,6 @@ func New(
dashboardModuleCallback func(sqlstore.SQLStore, factory.ProviderSettings, analytics.Analytics, organization.Getter, queryparser.QueryParser, querier.Querier, licensing.Licensing) dashboard.Module,
gatewayProviderFactory func(licensing.Licensing) factory.ProviderFactory[gateway.Gateway, gateway.Config],
auditorProviderFactories func(licensing.Licensing) factory.NamedMap[factory.ProviderFactory[auditor.Auditor, auditor.Config]],
meterReporterProviderFactories func(context.Context, factory.ProviderSettings, flagger.Flagger, licensing.Licensing, telemetrystore.TelemetryStore, retention.Getter, organization.Getter, zeus.Zeus) (factory.NamedMap[factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config]], string),
querierHandlerCallback func(factory.ProviderSettings, querier.Querier, analytics.Analytics) querier.Handler,
cloudIntegrationCallback func(sqlstore.SQLStore, global.Global, zeus.Zeus, gateway.Gateway, licensing.Licensing, serviceaccount.Module, cloudintegration.Config) (cloudintegration.Module, error),
rulerProviderFactories func(cache.Cache, alertmanager.Alertmanager, sqlstore.SQLStore, telemetrystore.TelemetryStore, telemetrytypes.MetadataStore, prometheus.Prometheus, organization.Getter, rulestatehistory.Module, querier.Querier, queryparser.QueryParser) factory.NamedMap[factory.ProviderFactory[ruler.Ruler, ruler.Config]],
@@ -233,8 +228,6 @@ func New(
return nil, err
}
retentionGetter := implretention.NewGetter(implretention.NewStore(sqlstore))
// Initialize prometheus from the available prometheus provider factories
prometheus, err := factory.NewProviderFromNamedMap(
ctx,
@@ -393,13 +386,6 @@ func New(
return nil, err
}
// Initialize meter reporter from the variant-specific provider factories
meterReporterFactories, meterReporterProvider := meterReporterProviderFactories(ctx, providerSettings, flagger, licensing, telemetrystore, retentionGetter, orgGetter, zeus)
meterReporter, err := factory.NewProviderFromNamedMap(ctx, providerSettings, config.MeterReporter, meterReporterFactories, meterReporterProvider)
if err != nil {
return nil, err
}
// Initialize authns
store := sqlauthnstore.NewStore(sqlstore)
authNs, err := authNsCallback(ctx, providerSettings, store, licensing)
@@ -455,7 +441,7 @@ func New(
}
// Initialize all modules
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, analytics, querier, telemetrystore, telemetryMetadataStore, authNs, authz, cache, queryParser, config, dashboard, userGetter, userRoleStore, serviceAccount, cloudIntegrationModule, retentionGetter, flagger)
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, analytics, querier, telemetrystore, telemetryMetadataStore, authNs, authz, cache, queryParser, config, dashboard, userGetter, userRoleStore, serviceAccount, cloudIntegrationModule, flagger)
// Initialize ruler from the variant-specific provider factories
rulerInstance, err := factory.NewProviderFromNamedMap(ctx, providerSettings, config.Ruler, rulerProviderFactories(cache, alertmanager, sqlstore, telemetrystore, telemetryMetadataStore, prometheus, orgGetter, modules.RuleStateHistory, querier, queryParser), "signoz")
@@ -515,7 +501,6 @@ func New(
factory.NewNamedService(factory.MustNewName("authz"), authz),
factory.NewNamedService(factory.MustNewName("user"), userService, factory.MustNewName("authz")),
factory.NewNamedService(factory.MustNewName("auditor"), auditor),
factory.NewNamedService(factory.MustNewName("meterreporter"), meterReporter, factory.MustNewName("licensing")),
factory.NewNamedService(factory.MustNewName("ruler"), rulerInstance),
)
if err != nil {
@@ -565,6 +550,5 @@ func New(
Flagger: flagger,
Gateway: gateway,
Auditor: auditor,
MeterReporter: meterReporter,
}, nil
}

View File

@@ -25,17 +25,6 @@ func NewConditionBuilder(fm qbtypes.FieldMapper, fl flagger.Flagger) *conditionB
return &conditionBuilder{fm: fm, fl: fl}
}
// ftsMapExprs returns mapKeys and mapValues expressions for FTS matching on a map column.
// Non-string value types are wrapped with arrayMap(x -> toString(x), ...).
func ftsMapExprs(col *schema.Column) (keysExpr, valsExpr string) {
keysExpr = fmt.Sprintf("mapKeys(%s)", col.Name)
valsExpr = fmt.Sprintf("mapValues(%s)", col.Name)
if mc, ok := col.Type.(schema.MapColumnType); ok && mc.ValueType.GetType() != schema.ColumnTypeEnumString {
valsExpr = fmt.Sprintf("arrayMap(x -> toString(x), mapValues(%s))", col.Name)
}
return
}
func (c *conditionBuilder) conditionFor(
ctx context.Context,
startNs, endNs uint64,
@@ -129,24 +118,10 @@ func (c *conditionBuilder) conditionFor(
return sb.NotILike(fieldExpression, fmt.Sprintf("%%%s%%", value)), nil
case qbtypes.FilterOperatorRegexp:
if key.Name == querybuilder.FTSInternalKey {
rawVal := fmt.Sprintf("%v", value)
keysExpr, valsExpr := ftsMapExprs(columns[0])
keysCond := fmt.Sprintf(`arrayExists(x -> match(x, %s), %s)`, sb.Var(rawVal), keysExpr)
valsCond := fmt.Sprintf(`arrayExists(x -> match(x, %s), %s)`, sb.Var(rawVal), valsExpr)
return sb.Or(keysCond, valsCond), nil
}
// Note: Escape $$ to $$$$ to avoid sqlbuilder interpreting materialized $ signs
// Only needed because we are using sprintf instead of sb.Match (not implemented in sqlbuilder)
return fmt.Sprintf(`match(%s, %s)`, sqlbuilder.Escape(fieldExpression), sb.Var(value)), nil
case qbtypes.FilterOperatorNotRegexp:
if key.Name == querybuilder.FTSInternalKey {
rawVal := fmt.Sprintf("%v", value)
keysExpr, valsExpr := ftsMapExprs(columns[0])
keysCond := fmt.Sprintf(`arrayExists(x -> match(x, %s), %s)`, sb.Var(rawVal), keysExpr)
valsCond := fmt.Sprintf(`arrayExists(x -> match(x, %s), %s)`, sb.Var(rawVal), valsExpr)
return "NOT " + sb.Or(keysCond, valsCond), nil
}
// Note: Escape $$ to $$$$ to avoid sqlbuilder interpreting materialized $ signs
// Only needed because we are using sprintf instead of sb.Match (not implemented in sqlbuilder)
return fmt.Sprintf(`NOT match(%s, %s)`, sqlbuilder.Escape(fieldExpression), sb.Var(value)), nil
@@ -307,12 +282,6 @@ func (c *conditionBuilder) ConditionFor(
return "", err
}
// FTS wildcard conditions are self-contained (arrayExists over full map);
// no additional EXISTS wrapper is needed.
if key.Name == querybuilder.FTSInternalKey {
return condition, nil
}
// Skip adding exists filter for intrinsic fields i.e. Table level log context fields
buildExistCondition := operator.AddDefaultExistsFilter()
switch key.FieldContext {

View File

@@ -514,67 +514,6 @@ func TestConditionFor(t *testing.T) {
expectedSQL: "",
expectedError: qbtypes.ErrColumnNotFound,
},
// FTS wildcard (FTSInternalKey) cases
{
name: "FTS wildcard - attribute string REGEXP",
key: telemetrytypes.TelemetryFieldKey{
Name: "_X_INTERNAL_FTS_KEY",
FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
operator: qbtypes.FilterOperatorRegexp,
value: "error",
expectedSQL: "arrayExists(x -> match(x, ?), mapKeys(attributes_string)) OR arrayExists(x -> match(x, ?), mapValues(attributes_string))",
expectedArgs: []any{"error", "error"},
},
{
name: "FTS wildcard - attribute number REGEXP",
key: telemetrytypes.TelemetryFieldKey{
Name: "_X_INTERNAL_FTS_KEY",
FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeNumber,
},
operator: qbtypes.FilterOperatorRegexp,
value: "42",
expectedSQL: "arrayExists(x -> match(x, ?), mapKeys(attributes_number)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_number)))",
expectedArgs: []any{"42", "42"},
},
{
name: "FTS wildcard - attribute bool REGEXP",
key: telemetrytypes.TelemetryFieldKey{
Name: "_X_INTERNAL_FTS_KEY",
FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeBool,
},
operator: qbtypes.FilterOperatorRegexp,
value: "true",
expectedSQL: "arrayExists(x -> match(x, ?), mapKeys(attributes_bool)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_bool)))",
expectedArgs: []any{"true", "true"},
},
{
name: "FTS wildcard - resource string REGEXP",
key: telemetrytypes.TelemetryFieldKey{
Name: "_X_INTERNAL_FTS_KEY",
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
operator: qbtypes.FilterOperatorRegexp,
value: "my-service",
expectedSQL: "arrayExists(x -> match(x, ?), mapKeys(resources_string)) OR arrayExists(x -> match(x, ?), mapValues(resources_string))",
expectedArgs: []any{"my-service", "my-service"},
},
{
name: "FTS wildcard - NOT REGEXP",
key: telemetrytypes.TelemetryFieldKey{
Name: "_X_INTERNAL_FTS_KEY",
FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
operator: qbtypes.FilterOperatorNotRegexp,
value: "healthcheck",
expectedSQL: "NOT (arrayExists(x -> match(x, ?), mapKeys(attributes_string)) OR arrayExists(x -> match(x, ?), mapValues(attributes_string)))",
expectedArgs: []any{"healthcheck", "healthcheck"},
},
}
fl := flaggertest.New(t)
fm := NewFieldMapper(fl)

View File

@@ -4,7 +4,6 @@ import (
"fmt"
"github.com/SigNoz/signoz-otel-collector/constants"
"github.com/SigNoz/signoz/pkg/querybuilder"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
)
@@ -126,21 +125,6 @@ var (
Direction: qbtypes.OrderDirectionDesc,
},
}
// DefaultFTSFieldKeys is the ordered set of TelemetryFieldKey instances that
// search() fans out across. Intrinsic log columns use the normal conditionFor
// path; entries with Name==FTSInternalKey are short-circuited in conditionFor
// to emit arrayExists conditions over mapKeys/mapValues without arrayConcat.
DefaultFTSFieldKeys = []*telemetrytypes.TelemetryFieldKey{
{Name: LogsV2BodyColumn, Signal: telemetrytypes.SignalLogs, FieldContext: telemetrytypes.FieldContextLog, FieldDataType: telemetrytypes.FieldDataTypeString},
{Name: LogsV2SeverityTextColumn, Signal: telemetrytypes.SignalLogs, FieldContext: telemetrytypes.FieldContextLog, FieldDataType: telemetrytypes.FieldDataTypeString},
{Name: LogsV2TraceIDColumn, Signal: telemetrytypes.SignalLogs, FieldContext: telemetrytypes.FieldContextLog, FieldDataType: telemetrytypes.FieldDataTypeString},
{Name: LogsV2SpanIDColumn, Signal: telemetrytypes.SignalLogs, FieldContext: telemetrytypes.FieldContextLog, FieldDataType: telemetrytypes.FieldDataTypeString},
{Name: querybuilder.FTSInternalKey, Signal: telemetrytypes.SignalLogs, FieldContext: telemetrytypes.FieldContextAttribute, FieldDataType: telemetrytypes.FieldDataTypeString},
{Name: querybuilder.FTSInternalKey, Signal: telemetrytypes.SignalLogs, FieldContext: telemetrytypes.FieldContextAttribute, FieldDataType: telemetrytypes.FieldDataTypeNumber},
{Name: querybuilder.FTSInternalKey, Signal: telemetrytypes.SignalLogs, FieldContext: telemetrytypes.FieldContextAttribute, FieldDataType: telemetrytypes.FieldDataTypeBool},
{Name: querybuilder.FTSInternalKey, Signal: telemetrytypes.SignalLogs, FieldContext: telemetrytypes.FieldContextResource, FieldDataType: telemetrytypes.FieldDataTypeString},
}
)
func bodyAliasExpression(bodyJSONEnabled bool) string {

View File

@@ -13,7 +13,6 @@ import (
"github.com/SigNoz/signoz-otel-collector/utils"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/querybuilder"
"github.com/SigNoz/signoz/pkg/types/featuretypes"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
@@ -80,9 +79,6 @@ func NewFieldMapper(fl flagger.Flagger) qbtypes.FieldMapper {
func (m *fieldMapper) getColumn(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) ([]*schema.Column, error) {
switch key.FieldContext {
case telemetrytypes.FieldContextResource:
if key.Name == querybuilder.FTSInternalKey {
return []*schema.Column{logsV2Columns["resources_string"]}, nil
}
columns := []*schema.Column{logsV2Columns["resources_string"], logsV2Columns["resource"]}
return columns, nil
case telemetrytypes.FieldContextScope:

View File

@@ -1,137 +0,0 @@
package telemetrylogs
import (
"context"
"strings"
"testing"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/flagger/flaggertest"
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
"github.com/SigNoz/signoz/pkg/querybuilder"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/huandu/go-sqlbuilder"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSearchFunctionFTS(t *testing.T) {
fl := flaggertest.New(t)
releaseTime := time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC)
ctx := context.Background()
fm := NewFieldMapper(fl)
cb := NewConditionBuilder(fm, fl)
keys := buildCompleteFieldKeyMap(releaseTime)
for _, field := range IntrinsicFields {
f := field
keys[field.Name] = append(keys[field.Name], &f)
}
startNs := uint64(releaseTime.Add(-1 * time.Hour).UnixNano())
endNs := uint64(releaseTime.Add(1 * time.Hour).UnixNano())
makeOpts := func(ftsKeys []*telemetrytypes.TelemetryFieldKey) querybuilder.FilterExprVisitorOpts {
return querybuilder.FilterExprVisitorOpts{
Context: ctx,
Logger: instrumentationtest.New().Logger(),
FieldMapper: fm,
ConditionBuilder: cb,
FieldKeys: keys,
JsonKeyToKey: GetBodyJSONKey,
StartNs: startNs,
EndNs: endNs,
FTSFieldKeys: ftsKeys,
}
}
t.Run("search quoted string fans out to all columns", func(t *testing.T) {
clause, err := querybuilder.PrepareWhereClause("search('error')", makeOpts(DefaultFTSFieldKeys))
require.NoError(t, err)
require.NotNil(t, clause)
sql, _ := clause.WhereClause.BuildWithFlavor(sqlbuilder.ClickHouse)
// Must touch all 8 targets: body, severity_text, trace_id, span_id, plus 4 map pairs
assert.Contains(t, sql, "match(LOWER(body), LOWER(?))")
assert.Contains(t, sql, "match(severity_text, ?)")
assert.Contains(t, sql, "match(trace_id, ?)")
assert.Contains(t, sql, "match(span_id, ?)")
assert.Contains(t, sql, "arrayExists(x -> match(x, ?), mapKeys(attributes_string))")
assert.Contains(t, sql, "arrayExists(x -> match(x, ?), mapValues(attributes_string))")
assert.Contains(t, sql, "arrayExists(x -> match(x, ?), mapKeys(attributes_number))")
assert.Contains(t, sql, "arrayExists(x -> match(x, ?), mapKeys(attributes_bool))")
assert.Contains(t, sql, "arrayExists(x -> match(x, ?), mapKeys(resources_string))")
})
t.Run("search unquoted token produces same result as quoted", func(t *testing.T) {
quoted, err := querybuilder.PrepareWhereClause("search('error')", makeOpts(DefaultFTSFieldKeys))
require.NoError(t, err)
unquoted, err := querybuilder.PrepareWhereClause("search(error)", makeOpts(DefaultFTSFieldKeys))
require.NoError(t, err)
sqlQ, argsQ := quoted.WhereClause.BuildWithFlavor(sqlbuilder.ClickHouse)
sqlU, argsU := unquoted.WhereClause.BuildWithFlavor(sqlbuilder.ClickHouse)
assert.Equal(t, sqlQ, sqlU)
assert.Equal(t, argsQ, argsU)
})
t.Run("NOT search wraps entire condition", func(t *testing.T) {
clause, err := querybuilder.PrepareWhereClause("NOT search('healthcheck')", makeOpts(DefaultFTSFieldKeys))
require.NoError(t, err)
require.NotNil(t, clause)
sql, _ := clause.WhereClause.BuildWithFlavor(sqlbuilder.ClickHouse)
assert.Contains(t, sql, "NOT (")
})
t.Run("search combined with other filter", func(t *testing.T) {
clause, err := querybuilder.PrepareWhereClause("search('error') AND severity_text = 'ERROR'", makeOpts(DefaultFTSFieldKeys))
require.NoError(t, err)
require.NotNil(t, clause)
sql, _ := clause.WhereClause.BuildWithFlavor(sqlbuilder.ClickHouse)
assert.Contains(t, sql, "match(LOWER(body), LOWER(?))")
assert.Contains(t, sql, "severity_text = ?")
})
t.Run("search invalid regex is escaped as literal", func(t *testing.T) {
clause, err := querybuilder.PrepareWhereClause("search('[ERROR-1234]')", makeOpts(DefaultFTSFieldKeys))
require.NoError(t, err)
require.NotNil(t, clause)
_, args := clause.WhereClause.BuildWithFlavor(sqlbuilder.ClickHouse)
// FormatFullTextSearch escapes invalid regex — all args should be the escaped form
for _, arg := range args {
if s, ok := arg.(string); ok {
assert.Equal(t, `\[ERROR-1234\]`, s)
}
}
})
t.Run("search with FTSColumnKeys nil returns error", func(t *testing.T) {
_, err := querybuilder.PrepareWhereClause("search('error')", makeOpts(nil))
require.Error(t, err)
_, _, _, _, _, additionals := errors.Unwrapb(err)
found := false
for _, a := range additionals {
if strings.Contains(a, "search() is only supported for log queries") {
found = true
break
}
}
assert.True(t, found, "expected 'only supported for log queries' error, got: %v", additionals)
})
t.Run("search with window exceeding 6 hours returns error", func(t *testing.T) {
opts := makeOpts(DefaultFTSFieldKeys)
opts.StartNs = uint64(releaseTime.UnixNano())
opts.EndNs = uint64(releaseTime.Add(7 * time.Hour).UnixNano())
_, err := querybuilder.PrepareWhereClause("search('error')", opts)
require.Error(t, err)
_, _, _, _, _, additionals := errors.Unwrapb(err)
found := false
for _, a := range additionals {
if strings.Contains(a, "6-hour") {
found = true
break
}
}
assert.True(t, found, "expected 6-hour window error, got: %v", additionals)
})
}

View File

@@ -100,7 +100,7 @@ func TestFilterExprLogs(t *testing.T) {
category: "Single word",
query: "<script>alert('xss')</script>",
shouldPass: false,
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got '<'",
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got '<'",
},
// Single word searches with spaces
@@ -166,7 +166,7 @@ func TestFilterExprLogs(t *testing.T) {
category: "Special characters",
query: "[tracing]",
shouldPass: false,
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got '['",
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got '['",
},
{
category: "Special characters",
@@ -196,7 +196,7 @@ func TestFilterExprLogs(t *testing.T) {
category: "Special characters",
query: "ERROR: cannot execute update() in a read-only context",
shouldPass: false,
expectedErrorContains: "expecting one of {(, AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got ')'",
expectedErrorContains: "expecting one of {(, AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got ')'",
},
{
category: "Special characters",
@@ -618,7 +618,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: []any{},
expectedErrorContains: "expecting one of {(, ), FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'and'",
expectedErrorContains: "expecting one of {(, ), FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'and'",
},
{
category: "Keyword conflict",
@@ -626,7 +626,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: []any{},
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'or'",
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'or'",
},
{
category: "Keyword conflict",
@@ -634,7 +634,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: []any{},
expectedErrorContains: "expecting one of {(, ), FREETEXT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got EOF",
expectedErrorContains: "expecting one of {(, ), FREETEXT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got EOF",
},
{
category: "Keyword conflict",
@@ -642,7 +642,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: []any{},
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'like'",
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'like'",
},
{
category: "Keyword conflict",
@@ -650,7 +650,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: []any{},
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'between'",
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'between'",
},
{
category: "Keyword conflict",
@@ -658,7 +658,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: []any{},
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'in'",
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'in'",
},
{
category: "Keyword conflict",
@@ -666,7 +666,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: []any{},
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'exists'",
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'exists'",
},
{
category: "Keyword conflict",
@@ -674,7 +674,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: []any{},
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'regexp'",
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'regexp'",
},
{
category: "Keyword conflict",
@@ -682,7 +682,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: []any{},
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'contains'",
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'contains'",
},
{
category: "Keyword conflict",
@@ -2018,9 +2018,9 @@ func TestFilterExprLogs(t *testing.T) {
expectedErrorContains: "",
},
{category: "Only keywords", query: "AND", shouldPass: false, expectedErrorContains: "expecting one of {(, ), FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'AND'"},
{category: "Only keywords", query: "OR", shouldPass: false, expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'OR'"},
{category: "Only keywords", query: "NOT", shouldPass: false, expectedErrorContains: "expecting one of {(, ), FREETEXT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got EOF"},
{category: "Only keywords", query: "AND", shouldPass: false, expectedErrorContains: "expecting one of {(, ), FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'AND'"},
{category: "Only keywords", query: "OR", shouldPass: false, expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'OR'"},
{category: "Only keywords", query: "NOT", shouldPass: false, expectedErrorContains: "expecting one of {(, ), FREETEXT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got EOF"},
{category: "Only functions", query: "has", shouldPass: false, expectedErrorContains: "expecting one of {(, )} but got EOF"},
{category: "Only functions", query: "hasAny", shouldPass: false, expectedErrorContains: "expecting one of {(, )} but got EOF"},
@@ -2162,7 +2162,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: nil,
expectedErrorContains: "line 1:0 expecting one of {(, ), FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'and'",
expectedErrorContains: "line 1:0 expecting one of {(, ), FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'and'",
},
{
category: "Operator keywords as keys",
@@ -2170,7 +2170,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: nil,
expectedErrorContains: "line 1:0 expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'or'",
expectedErrorContains: "line 1:0 expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'or'",
},
{
category: "Operator keywords as keys",
@@ -2178,7 +2178,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: nil,
expectedErrorContains: "line 1:3 expecting one of {(, ), FREETEXT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got '='",
expectedErrorContains: "line 1:3 expecting one of {(, ), FREETEXT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got '='",
},
{
category: "Operator keywords as keys",
@@ -2186,7 +2186,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: nil,
expectedErrorContains: "line 1:0 expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'between'",
expectedErrorContains: "line 1:0 expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'between'",
},
{
category: "Operator keywords as keys",
@@ -2194,7 +2194,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: nil,
expectedErrorContains: "line 1:0 expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'in'",
expectedErrorContains: "line 1:0 expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'in'",
},
// Using function keywords as keys

View File

@@ -663,11 +663,11 @@ func (b *logQueryStatementBuilder) addFilterCondition(
FieldKeys: keys,
BodyJSONEnabled: bodyJSONEnabled,
SkipResourceFilter: true,
FullTextColumn: b.fullTextColumn,
JsonKeyToKey: b.jsonKeyToKey,
Variables: variables,
StartNs: start,
EndNs: end,
FTSFieldKeys: DefaultFTSFieldKeys,
})
if err != nil {

View File

@@ -2,7 +2,6 @@ package telemetrylogs
import (
"context"
"fmt"
"testing"
"time"
@@ -365,7 +364,7 @@ func TestStatementBuilderListQueryResourceTests(t *testing.T) {
expectedErr error
}{
{
name: "List with full text search exceeds 6h window",
name: "List with full text search",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
@@ -374,7 +373,11 @@ func TestStatementBuilderListQueryResourceTests(t *testing.T) {
},
Limit: 10,
},
expectedErr: fmt.Errorf("parsing the search expression"),
expected: qbtypes.Statement{
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE match(LOWER(body), LOWER(?)) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"hello", "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
},
expectedErr: nil,
},
{
name: "list query with mat col order by",
@@ -382,7 +385,7 @@ func TestStatementBuilderListQueryResourceTests(t *testing.T) {
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
Filter: &qbtypes.Filter{
Expression: "service.name = 'cartservice'",
Expression: "service.name = 'cartservice' hello",
},
Limit: 10,
Order: []qbtypes.OrderBy{
@@ -399,8 +402,8 @@ func TestStatementBuilderListQueryResourceTests(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? ORDER BY `attribute_string_materialized$$key$$name` AS `materialized.key.name` desc LIMIT ?",
Args: []any{"cartservice", "%service.name%", "%service.name\":\"cartservice%", uint64(1747945619), uint64(1747983448), "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND match(LOWER(body), LOWER(?)) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? ORDER BY `attribute_string_materialized$$key$$name` AS `materialized.key.name` desc LIMIT ?",
Args: []any{"cartservice", "%service.name%", "%service.name\":\"cartservice%", uint64(1747945619), uint64(1747983448), "hello", "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
},
expectedErr: nil,
},
@@ -1034,7 +1037,7 @@ func TestStmtBuilderBodyField(t *testing.T) {
}
}
func TestStmtBuilderFTS(t *testing.T) {
func TestStmtBuilderBodyFullTextSearch(t *testing.T) {
cases := []struct {
name string
requestType qbtypes.RequestType
@@ -1042,9 +1045,6 @@ func TestStmtBuilderFTS(t *testing.T) {
enableUseJSONBody bool
expected qbtypes.Statement
expectedErr error
// optional per-case time window (ms); zero → use default 1747947419000/1747983448000
startMs uint64
endMs uint64
}{
{
name: "fts",
@@ -1055,11 +1055,10 @@ func TestStmtBuilderFTS(t *testing.T) {
Limit: 10,
},
enableUseJSONBody: true,
startMs: 1705309200000,
endMs: 1705316400000,
expected: qbtypes.Statement{
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body_v2 as body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE (match(LOWER(body_v2.message), LOWER(?)) OR match(severity_text, ?) OR match(trace_id, ?) OR match(span_id, ?) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_string)) OR arrayExists(x -> match(x, ?), mapValues(attributes_string))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_number)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_number)))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_bool)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_bool)))) OR (arrayExists(x -> match(x, ?), mapKeys(resources_string)) OR arrayExists(x -> match(x, ?), mapValues(resources_string)))) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "1705309200000000000", uint64(1705307400), "1705316400000000000", uint64(1705316400), 10},
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body_v2 as body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE match(LOWER(body_v2.message), LOWER(?)) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"error", "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
Warnings: []string{querybuilder.BodyFullTextSearchDefaultWarning},
},
expectedErr: nil,
},
@@ -1072,16 +1071,15 @@ func TestStmtBuilderFTS(t *testing.T) {
Limit: 10,
},
enableUseJSONBody: true,
startMs: 1705309200000,
endMs: 1705316400000,
expected: qbtypes.Statement{
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body_v2 as body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE (match(LOWER(body_v2.message), LOWER(?)) OR match(severity_text, ?) OR match(trace_id, ?) OR match(span_id, ?) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_string)) OR arrayExists(x -> match(x, ?), mapValues(attributes_string))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_number)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_number)))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_bool)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_bool)))) OR (arrayExists(x -> match(x, ?), mapKeys(resources_string)) OR arrayExists(x -> match(x, ?), mapValues(resources_string)))) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "1705309200000000000", uint64(1705307400), "1705316400000000000", uint64(1705316400), 10},
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body_v2 as body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE match(LOWER(body_v2.message), LOWER(?)) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"error", "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
Warnings: []string{querybuilder.BodyFullTextSearchDefaultWarning},
},
expectedErr: nil,
},
{
name: "fts_json_disabled",
name: "fts_disabled",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
@@ -1089,91 +1087,12 @@ func TestStmtBuilderFTS(t *testing.T) {
Limit: 10,
},
enableUseJSONBody: false,
startMs: 1705309200000,
endMs: 1705316400000,
expected: qbtypes.Statement{
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE (match(LOWER(body), LOWER(?)) OR match(severity_text, ?) OR match(trace_id, ?) OR match(span_id, ?) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_string)) OR arrayExists(x -> match(x, ?), mapValues(attributes_string))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_number)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_number)))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_bool)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_bool)))) OR (arrayExists(x -> match(x, ?), mapKeys(resources_string)) OR arrayExists(x -> match(x, ?), mapValues(resources_string)))) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "1705309200000000000", uint64(1705307400), "1705316400000000000", uint64(1705316400), 10},
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE match(LOWER(body), LOWER(?)) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"error", "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
},
expectedErr: nil,
},
// search() function: uses a 2-hour window to stay under the 6-hour limit
{
name: "search_fans_out_to_all_columns",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
Filter: &qbtypes.Filter{Expression: "search('error')"},
Limit: 10,
},
enableUseJSONBody: false,
startMs: 1705309200000,
endMs: 1705316400000,
expected: qbtypes.Statement{
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE (match(LOWER(body), LOWER(?)) OR match(severity_text, ?) OR match(trace_id, ?) OR match(span_id, ?) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_string)) OR arrayExists(x -> match(x, ?), mapValues(attributes_string))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_number)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_number)))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_bool)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_bool)))) OR (arrayExists(x -> match(x, ?), mapKeys(resources_string)) OR arrayExists(x -> match(x, ?), mapValues(resources_string)))) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "1705309200000000000", uint64(1705307400), "1705316400000000000", uint64(1705316400), 10},
},
},
{
name: "search_unquoted_token",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
Filter: &qbtypes.Filter{Expression: "search(error)"},
Limit: 10,
},
enableUseJSONBody: false,
startMs: 1705309200000,
endMs: 1705316400000,
expected: qbtypes.Statement{
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE (match(LOWER(body), LOWER(?)) OR match(severity_text, ?) OR match(trace_id, ?) OR match(span_id, ?) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_string)) OR arrayExists(x -> match(x, ?), mapValues(attributes_string))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_number)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_number)))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_bool)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_bool)))) OR (arrayExists(x -> match(x, ?), mapKeys(resources_string)) OR arrayExists(x -> match(x, ?), mapValues(resources_string)))) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "1705309200000000000", uint64(1705307400), "1705316400000000000", uint64(1705316400), 10},
},
},
{
name: "search_not_wraps_condition",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
Filter: &qbtypes.Filter{Expression: "NOT search('healthcheck')"},
Limit: 10,
},
enableUseJSONBody: false,
startMs: 1705309200000,
endMs: 1705316400000,
expected: qbtypes.Statement{
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE NOT ((match(LOWER(body), LOWER(?)) OR match(severity_text, ?) OR match(trace_id, ?) OR match(span_id, ?) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_string)) OR arrayExists(x -> match(x, ?), mapValues(attributes_string))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_number)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_number)))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_bool)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_bool)))) OR (arrayExists(x -> match(x, ?), mapKeys(resources_string)) OR arrayExists(x -> match(x, ?), mapValues(resources_string))))) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"healthcheck", "healthcheck", "healthcheck", "healthcheck", "healthcheck", "healthcheck", "healthcheck", "healthcheck", "healthcheck", "healthcheck", "healthcheck", "healthcheck", "1705309200000000000", uint64(1705307400), "1705316400000000000", uint64(1705316400), 10},
},
},
{
name: "search_combined_with_filter",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
Filter: &qbtypes.Filter{Expression: "search('error') AND severity_text = 'ERROR'"},
Limit: 10,
},
enableUseJSONBody: false,
startMs: 1705309200000,
endMs: 1705316400000,
expected: qbtypes.Statement{
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE ((match(LOWER(body), LOWER(?)) OR match(severity_text, ?) OR match(trace_id, ?) OR match(span_id, ?) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_string)) OR arrayExists(x -> match(x, ?), mapValues(attributes_string))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_number)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_number)))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_bool)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_bool)))) OR (arrayExists(x -> match(x, ?), mapKeys(resources_string)) OR arrayExists(x -> match(x, ?), mapValues(resources_string)))) AND severity_text = ?) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "ERROR", "1705309200000000000", uint64(1705307400), "1705316400000000000", uint64(1705316400), 10},
},
},
{
// default window is ~10h which exceeds the 6-hour search() limit
name: "search_window_exceeds_6h",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
Filter: &qbtypes.Filter{Expression: "search('error')"},
Limit: 10,
},
enableUseJSONBody: false,
expectedErr: fmt.Errorf("6-hour"),
},
}
for _, c := range cases {
@@ -1199,24 +1118,19 @@ func TestStmtBuilderFTS(t *testing.T) {
fl,
)
startMs := uint64(1747947419000)
if c.startMs != 0 {
startMs = c.startMs
}
endMs := uint64(1747983448000)
if c.endMs != 0 {
endMs = c.endMs
}
q, err := statementBuilder.Build(context.Background(), startMs, endMs, c.requestType, c.query, nil)
q, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, c.requestType, c.query, nil)
if c.expectedErr != nil {
require.Error(t, err)
require.Contains(t, err.Error(), c.expectedErr.Error())
} else {
require.NoError(t, err)
if c.expected.Query != "" {
require.Equal(t, c.expected.Query, q.Query)
require.Equal(t, c.expected.Args, q.Args)
require.Equal(t, c.expected.Warnings, q.Warnings)
if err != nil {
_, _, _, _, _, add := errors.Unwrapb(err)
t.Logf("error additionals: %v", add)
}
require.NoError(t, err)
require.Equal(t, c.expected.Query, q.Query)
require.Equal(t, c.expected.Args, q.Args)
require.Equal(t, c.expected.Warnings, q.Warnings)
}
})
}

View File

@@ -413,28 +413,6 @@ func (b *traceOperatorCTEBuilder) buildFinalQuery(ctx context.Context, selectFro
}
func (b *traceOperatorCTEBuilder) buildListQuery(ctx context.Context, selectFromCTE string) (*qbtypes.Statement, error) {
sb := sqlbuilder.NewSelectBuilder()
// Select core fields
sb.Select(
"timestamp",
"trace_id",
"span_id",
"name",
"duration_nano",
"parent_span_id",
)
selectedFields := map[string]bool{
"timestamp": true,
"trace_id": true,
"span_id": true,
"name": true,
"duration_nano": true,
"parent_span_id": true,
}
// Get keys for selectFields
keySelectors := b.getKeySelectors()
for _, field := range b.operator.SelectFields {
keySelectors = append(keySelectors, &telemetrytypes.FieldKeySelector{
@@ -444,13 +422,38 @@ func (b *traceOperatorCTEBuilder) buildListQuery(ctx context.Context, selectFrom
FieldDataType: field.FieldDataType,
})
}
keys, _, err := b.stmtBuilder.metadataStore.GetKeysMulti(ctx, keySelectors)
if err != nil {
return nil, err
}
// Add selectFields using ColumnExpressionFor since we now have all base table columns
coreFields := []string{"timestamp", "trace_id", "span_id", "name", "duration_nano", "parent_span_id"}
selectedFields := map[string]bool{
"timestamp": true,
"trace_id": true,
"span_id": true,
"name": true,
"duration_nano": true,
"parent_span_id": true,
}
// CH 25.12.5 distributed-analyzer regression (ClickHouse/ClickHouse#103508):
// native columns from a Distributed-table-backed CTE get renamed col → col_0
// in the shard block, making them invisible to the outer SELECT/ORDER BY.
// Fix: rename every core field to a safe alias (_s_<col>) in the inner SELECT
// so the analyzer never sees the original name as an ORDER BY target. The
// outer SELECT re-exposes them under their original names.
innerCoreExprs := make([]string, len(coreFields))
outerCoreExprs := make([]string, len(coreFields))
for i, f := range coreFields {
innerCoreExprs[i] = fmt.Sprintf("%s AS _s_%s", f, f)
outerCoreExprs[i] = fmt.Sprintf("_s_%s AS %s", f, f)
}
innerSB := sqlbuilder.NewSelectBuilder()
innerSB.Select(innerCoreExprs...)
var additionalSelectedFields []string
for _, field := range b.operator.SelectFields {
if selectedFields[field.Name] {
continue
@@ -461,41 +464,56 @@ func (b *traceOperatorCTEBuilder) buildListQuery(ctx context.Context, selectFrom
slog.String("field", field.Name), errors.Attr(err))
continue
}
sb.SelectMore(colExpr)
innerSB.SelectMore(colExpr)
selectedFields[field.Name] = true
additionalSelectedFields = append(additionalSelectedFields, field.Name)
}
sb.From(selectFromCTE)
// Add order by support using ColumnExpressionFor
orderApplied := false
for _, orderBy := range b.operator.Order {
if selectedFields[orderBy.Key.Name] {
continue
}
colExpr, err := b.stmtBuilder.fm.ColumnExpressionFor(ctx, b.start, b.end, &orderBy.Key.TelemetryFieldKey, keys)
if err != nil {
return nil, err
}
sb.OrderBy(fmt.Sprintf("%s %s", colExpr, orderBy.Direction.StringValue()))
orderApplied = true
innerSB.SelectMore(colExpr)
selectedFields[orderBy.Key.Name] = true
}
if !orderApplied {
sb.OrderBy("timestamp DESC")
innerSB.From(selectFromCTE)
innerSQL, innerArgs := innerSB.BuildWithFlavor(sqlbuilder.ClickHouse)
// Outer SELECT: re-exposes core fields under their original names and acts as
// a projection barrier so ORDER BY-only fields do not leak into the result.
outerSB := sqlbuilder.NewSelectBuilder()
outerSB.Select(outerCoreExprs...)
for _, name := range additionalSelectedFields {
outerSB.SelectMore(fmt.Sprintf("`%s`", name))
}
outerSB.From(fmt.Sprintf("(%s) AS t", innerSQL))
if len(b.operator.Order) > 0 {
for _, orderBy := range b.operator.Order {
outerSB.OrderBy(fmt.Sprintf("`%s` %s", orderBy.Key.Name, orderBy.Direction.StringValue()))
}
} else {
outerSB.OrderBy("timestamp DESC")
}
if b.operator.Limit > 0 {
sb.Limit(b.operator.Limit)
outerSB.Limit(b.operator.Limit)
} else {
sb.Limit(100)
outerSB.Limit(100)
}
if b.operator.Offset > 0 {
sb.Offset(b.operator.Offset)
outerSB.Offset(b.operator.Offset)
}
sql, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
outerSQL, outerArgs := outerSB.BuildWithFlavor(sqlbuilder.ClickHouse)
return &qbtypes.Statement{
Query: sql,
Args: args,
Query: outerSQL,
Args: append(innerArgs, outerArgs...),
}, nil
}

View File

@@ -67,7 +67,7 @@ func TestTraceOperatorStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_DIR_DESC_B AS (SELECT p.* FROM A AS p INNER JOIN B AS c ON p.trace_id = c.trace_id AND p.span_id = c.parent_span_id) SELECT timestamp, trace_id, span_id, name, duration_nano, parent_span_id, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) AS `service.name` FROM A_DIR_DESC_B ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_DIR_DESC_B AS (SELECT p.* FROM A AS p INNER JOIN B AS c ON p.trace_id = c.trace_id AND p.span_id = c.parent_span_id) SELECT _s_timestamp AS timestamp, _s_trace_id AS trace_id, _s_span_id AS span_id, _s_name AS name, _s_duration_nano AS duration_nano, _s_parent_span_id AS parent_span_id, `service.name` FROM (SELECT timestamp AS _s_timestamp, trace_id AS _s_trace_id, span_id AS _s_span_id, name AS _s_name, duration_nano AS _s_duration_nano, parent_span_id AS _s_parent_span_id, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) AS `service.name` FROM A_DIR_DESC_B) AS t ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "frontend", "%service.name%", "%service.name\":\"frontend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "backend", "%service.name%", "%service.name\":\"backend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10},
},
expectedErr: nil,
@@ -104,7 +104,7 @@ func TestTraceOperatorStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_INDIR_DESC_B AS (WITH RECURSIVE up AS (SELECT d.trace_id, d.span_id, d.parent_span_id, 0 AS depth FROM B AS d UNION ALL SELECT p.trace_id, p.span_id, p.parent_span_id, up.depth + 1 FROM all_spans AS p JOIN up ON p.trace_id = up.trace_id AND p.span_id = up.parent_span_id WHERE up.depth < 100) SELECT DISTINCT a.* FROM A AS a GLOBAL INNER JOIN (SELECT DISTINCT trace_id, span_id FROM up WHERE depth > 0 ) AS ancestors ON ancestors.trace_id = a.trace_id AND ancestors.span_id = a.span_id) SELECT timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM A_INDIR_DESC_B ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_INDIR_DESC_B AS (WITH RECURSIVE up AS (SELECT d.trace_id, d.span_id, d.parent_span_id, 0 AS depth FROM B AS d UNION ALL SELECT p.trace_id, p.span_id, p.parent_span_id, up.depth + 1 FROM all_spans AS p JOIN up ON p.trace_id = up.trace_id AND p.span_id = up.parent_span_id WHERE up.depth < 100) SELECT DISTINCT a.* FROM A AS a GLOBAL INNER JOIN (SELECT DISTINCT trace_id, span_id FROM up WHERE depth > 0 ) AS ancestors ON ancestors.trace_id = a.trace_id AND ancestors.span_id = a.span_id) SELECT _s_timestamp AS timestamp, _s_trace_id AS trace_id, _s_span_id AS span_id, _s_name AS name, _s_duration_nano AS duration_nano, _s_parent_span_id AS parent_span_id FROM (SELECT timestamp AS _s_timestamp, trace_id AS _s_trace_id, span_id AS _s_span_id, name AS _s_name, duration_nano AS _s_duration_nano, parent_span_id AS _s_parent_span_id FROM A_INDIR_DESC_B) AS t ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "gateway", "%service.name%", "%service.name\":\"gateway%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "database", "%service.name%", "%service.name\":\"database%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 5},
},
expectedErr: nil,
@@ -141,7 +141,7 @@ func TestTraceOperatorStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_AND_B AS (SELECT l.* FROM A AS l INNER JOIN B AS r ON l.trace_id = r.trace_id) SELECT timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM A_AND_B ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_AND_B AS (SELECT l.* FROM A AS l INNER JOIN B AS r ON l.trace_id = r.trace_id) SELECT _s_timestamp AS timestamp, _s_trace_id AS trace_id, _s_span_id AS span_id, _s_name AS name, _s_duration_nano AS duration_nano, _s_parent_span_id AS parent_span_id FROM (SELECT timestamp AS _s_timestamp, trace_id AS _s_trace_id, span_id AS _s_span_id, name AS _s_name, duration_nano AS _s_duration_nano, parent_span_id AS _s_parent_span_id FROM A_AND_B) AS t ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "frontend", "%service.name%", "%service.name\":\"frontend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "backend", "%service.name%", "%service.name\":\"backend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 15},
},
expectedErr: nil,
@@ -178,7 +178,7 @@ func TestTraceOperatorStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_OR_B AS (SELECT * FROM A UNION DISTINCT SELECT * FROM B) SELECT timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM A_OR_B ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_OR_B AS (SELECT * FROM A UNION DISTINCT SELECT * FROM B) SELECT _s_timestamp AS timestamp, _s_trace_id AS trace_id, _s_span_id AS span_id, _s_name AS name, _s_duration_nano AS duration_nano, _s_parent_span_id AS parent_span_id FROM (SELECT timestamp AS _s_timestamp, trace_id AS _s_trace_id, span_id AS _s_span_id, name AS _s_name, duration_nano AS _s_duration_nano, parent_span_id AS _s_parent_span_id FROM A_OR_B) AS t ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "frontend", "%service.name%", "%service.name\":\"frontend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "backend", "%service.name%", "%service.name\":\"backend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 20},
},
expectedErr: nil,
@@ -215,7 +215,7 @@ func TestTraceOperatorStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_not_B AS (SELECT l.* FROM A AS l WHERE l.trace_id GLOBAL NOT IN (SELECT DISTINCT trace_id FROM B)) SELECT timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM A_not_B ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_not_B AS (SELECT l.* FROM A AS l WHERE l.trace_id GLOBAL NOT IN (SELECT DISTINCT trace_id FROM B)) SELECT _s_timestamp AS timestamp, _s_trace_id AS trace_id, _s_span_id AS span_id, _s_name AS name, _s_duration_nano AS duration_nano, _s_parent_span_id AS parent_span_id FROM (SELECT timestamp AS _s_timestamp, trace_id AS _s_trace_id, span_id AS _s_span_id, name AS _s_name, duration_nano AS _s_duration_nano, parent_span_id AS _s_parent_span_id FROM A_not_B) AS t ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "frontend", "%service.name%", "%service.name\":\"frontend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "backend", "%service.name%", "%service.name\":\"backend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10},
},
expectedErr: nil,
@@ -380,11 +380,72 @@ func TestTraceOperatorStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_DIR_DESC_B AS (SELECT p.* FROM A AS p INNER JOIN B AS c ON p.trace_id = c.trace_id AND p.span_id = c.parent_span_id), __resource_filter_C AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), C AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_C) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_D AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), D AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_D) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), C_DIR_DESC_D AS (SELECT p.* FROM C AS p INNER JOIN D AS c ON p.trace_id = c.trace_id AND p.span_id = c.parent_span_id), A_DIR_DESC_B_AND_C_DIR_DESC_D AS (SELECT l.* FROM A_DIR_DESC_B AS l INNER JOIN C_DIR_DESC_D AS r ON l.trace_id = r.trace_id) SELECT timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM A_DIR_DESC_B_AND_C_DIR_DESC_D ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_DIR_DESC_B AS (SELECT p.* FROM A AS p INNER JOIN B AS c ON p.trace_id = c.trace_id AND p.span_id = c.parent_span_id), __resource_filter_C AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), C AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_C) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_D AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), D AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_D) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), C_DIR_DESC_D AS (SELECT p.* FROM C AS p INNER JOIN D AS c ON p.trace_id = c.trace_id AND p.span_id = c.parent_span_id), A_DIR_DESC_B_AND_C_DIR_DESC_D AS (SELECT l.* FROM A_DIR_DESC_B AS l INNER JOIN C_DIR_DESC_D AS r ON l.trace_id = r.trace_id) SELECT _s_timestamp AS timestamp, _s_trace_id AS trace_id, _s_span_id AS span_id, _s_name AS name, _s_duration_nano AS duration_nano, _s_parent_span_id AS parent_span_id FROM (SELECT timestamp AS _s_timestamp, trace_id AS _s_trace_id, span_id AS _s_span_id, name AS _s_name, duration_nano AS _s_duration_nano, parent_span_id AS _s_parent_span_id FROM A_DIR_DESC_B_AND_C_DIR_DESC_D) AS t ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "frontend", "%service.name%", "%service.name\":\"frontend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "backend", "%service.name%", "%service.name\":\"backend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "auth", "%service.name%", "%service.name\":\"auth%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "database", "%service.name%", "%service.name\":\"database%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 5},
},
expectedErr: nil,
},
{
// order-by field (http.request.method) is not present in SelectFields;
// it must be included in the inner SELECT so the outer ORDER BY can
// reference it by alias, but must NOT appear in the outer SELECT list.
name: "order by field not in select fields",
requestType: qbtypes.RequestTypeRaw,
operator: qbtypes.QueryBuilderTraceOperator{
Expression: "A => B",
SelectFields: []telemetrytypes.TelemetryFieldKey{
{
Name: "service.name",
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
},
Order: []qbtypes.OrderBy{
{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "http.request.method",
FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
},
Direction: qbtypes.OrderDirectionDesc,
},
},
Limit: 10,
},
compositeQuery: &qbtypes.CompositeQuery{
Queries: []qbtypes.QueryEnvelope{
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{
Name: "A",
Signal: telemetrytypes.SignalTraces,
Filter: &qbtypes.Filter{
Expression: "service.name = 'frontend'",
},
},
},
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{
Name: "B",
Signal: telemetrytypes.SignalTraces,
Filter: &qbtypes.Filter{
Expression: "service.name = 'backend'",
},
},
},
},
},
expected: qbtypes.Statement{
// http.request.method is in the inner SELECT (so ORDER BY can reach it)
// but is absent from the outer SELECT column list — only the ORDER BY clause references it.
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_DIR_DESC_B AS (SELECT p.* FROM A AS p INNER JOIN B AS c ON p.trace_id = c.trace_id AND p.span_id = c.parent_span_id) SELECT _s_timestamp AS timestamp, _s_trace_id AS trace_id, _s_span_id AS span_id, _s_name AS name, _s_duration_nano AS duration_nano, _s_parent_span_id AS parent_span_id, `service.name` FROM (SELECT timestamp AS _s_timestamp, trace_id AS _s_trace_id, span_id AS _s_span_id, name AS _s_name, duration_nano AS _s_duration_nano, parent_span_id AS _s_parent_span_id, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) AS `service.name`, attributes_string['http.request.method'] AS `http.request.method` FROM A_DIR_DESC_B) AS t ORDER BY `http.request.method` desc LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "frontend", "%service.name%", "%service.name\":\"frontend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "backend", "%service.name%", "%service.name\":\"backend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10},
},
expectedErr: nil,
},
}
fm := NewFieldMapper()

View File

@@ -1,12 +0,0 @@
package retentiontypes
import (
"context"
"github.com/SigNoz/signoz/pkg/valuer"
)
type Store interface {
// ListTTLSettingsByTableNameAndBeforeCreatedAt returns successful TTL settings before the given timestamp.
ListTTLSettingsByTableNameAndBeforeCreatedAt(ctx context.Context, orgID valuer.UUID, tableName string, beforeMs int64) ([]*TTLSetting, error)
}

View File

@@ -1,141 +1,10 @@
package retentiontypes
import (
"encoding/json"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types"
"github.com/uptrace/bun"
)
const secondsPerDay = 24 * 60 * 60
const (
DefaultLogsRetentionDays = 15
DefaultMetricsRetentionDays = 30
DefaultTracesRetentionDays = 15
)
const (
TraceTTL = "traces"
MetricsTTL = "metrics"
LogsTTL = "logs"
)
const (
TTLSettingStatusPending = "pending"
TTLSettingStatusFailed = "failed"
TTLSettingStatusSuccess = "success"
)
// RetentionPolicySegment is a half-open time range using one retention policy.
type RetentionPolicySegment struct {
StartMs int64
EndMs int64
Rules []CustomRetentionRule
DefaultDays int
}
// NewRetentionPolicySegment creates a retention policy segment for a half-open time range.
func NewRetentionPolicySegment(startMs int64, endMs int64, rules []CustomRetentionRule, defaultDays int) *RetentionPolicySegment {
return &RetentionPolicySegment{
StartMs: startMs,
EndMs: endMs,
Rules: rules,
DefaultDays: defaultDays,
}
}
// BuildRetentionPolicySegmentsFromRows converts successful TTL settings into retention policy segments.
func BuildRetentionPolicySegmentsFromRows(rows []*TTLSetting, fallbackDefaultDays int, startMs, endMs int64) ([]*RetentionPolicySegment, error) {
if startMs >= endMs {
return nil, nil
}
var activeAtStart *TTLSetting
inWindow := make([]*TTLSetting, 0, len(rows))
for _, row := range rows {
rowMs := row.CreatedAt.UnixMilli()
if rowMs <= startMs {
activeAtStart = row
continue
}
if rowMs >= endMs {
continue
}
inWindow = append(inWindow, row)
}
activeRules, activeDefault, err := parseTTLSetting(activeAtStart, fallbackDefaultDays)
if err != nil {
return nil, err
}
segments := make([]*RetentionPolicySegment, 0, len(inWindow)+1)
cursor := startMs
for _, row := range inWindow {
rowMs := row.CreatedAt.UnixMilli()
if rowMs <= cursor {
activeRules, activeDefault, err = parseTTLSetting(row, fallbackDefaultDays)
if err != nil {
return nil, err
}
continue
}
segments = append(segments, NewRetentionPolicySegment(cursor, rowMs, activeRules, activeDefault))
cursor = rowMs
activeRules, activeDefault, err = parseTTLSetting(row, fallbackDefaultDays)
if err != nil {
return nil, err
}
}
if cursor < endMs {
segments = append(segments, NewRetentionPolicySegment(cursor, endMs, activeRules, activeDefault))
}
return segments, nil
}
func parseTTLSetting(row *TTLSetting, fallbackDefaultDays int) ([]CustomRetentionRule, int, error) {
if row == nil {
return nil, fallbackDefaultDays, nil
}
defaultDays := row.TTL
if row.Condition == "" {
defaultDays = (row.TTL + secondsPerDay - 1) / secondsPerDay
}
if defaultDays <= 0 {
defaultDays = fallbackDefaultDays
}
if row.Condition == "" {
return nil, defaultDays, nil
}
var rules []CustomRetentionRule
if err := json.Unmarshal([]byte(row.Condition), &rules); err != nil {
return nil, 0, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "parse ttl_setting condition for row %q", row.ID.StringValue())
}
return rules, defaultDays, nil
}
// CustomRetentionRule is one custom retention rule as stored in ttl_setting.condition.
// Rules are evaluated in declaration order; the first matching rule wins.
type CustomRetentionRule struct {
Filters []FilterCondition `json:"conditions"`
TTLDays int `json:"ttlDays"`
}
// FilterCondition is one label-key, allowed-values condition inside a retention rule.
type FilterCondition struct {
Key string `json:"key"`
Values []string `json:"values"`
}
type TTLSetting struct {
bun.BaseModel `bun:"table:ttl_setting"`
types.Identifiable
@@ -148,73 +17,3 @@ type TTLSetting struct {
OrgID string `json:"-" bun:"org_id,notnull"`
Condition string `bun:"condition,type:text"`
}
type TTLParams struct {
Type string
ColdStorageVolume string
ToColdStorageDuration int64
DelDuration int64
}
type CustomRetentionTTLParams struct {
Type string `json:"type"`
DefaultTTLDays int `json:"defaultTTLDays"`
TTLConditions []CustomRetentionRule `json:"ttlConditions"`
ColdStorageVolume string `json:"coldStorageVolume,omitempty"`
ToColdStorageDurationDays int64 `json:"coldStorageDurationDays,omitempty"`
}
type GetTTLParams struct {
Type string
}
type GetCustomRetentionTTLResponse struct {
Version string `json:"version"`
Status string `json:"status"`
ExpectedLogsTime int `json:"expected_logs_ttl_duration_hrs,omitempty"`
ExpectedLogsMoveTime int `json:"expected_logs_move_ttl_duration_hrs,omitempty"`
DefaultTTLDays int `json:"default_ttl_days,omitempty"`
TTLConditions []CustomRetentionRule `json:"ttl_conditions,omitempty"`
ColdStorageVolume string `json:"cold_storage_volume,omitempty"`
ColdStorageTTLDays int `json:"cold_storage_ttl_days,omitempty"`
}
type CustomRetentionTTLResponse struct {
Message string `json:"message"`
}
type TTLStatusItem struct {
Id int `json:"id" db:"id"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
TableName string `json:"table_name" db:"table_name"`
TTL int `json:"ttl" db:"ttl"`
Status string `json:"status" db:"status"`
ColdStorageTtl int `json:"cold_storage_ttl" db:"cold_storage_ttl"`
}
type SetTTLResponseItem struct {
Message string `json:"message"`
}
type DBResponseTTL struct {
EngineFull string `ch:"engine_full"`
}
type GetTTLResponseItem struct {
MetricsTime int `json:"metrics_ttl_duration_hrs,omitempty"`
MetricsMoveTime int `json:"metrics_move_ttl_duration_hrs,omitempty"`
TracesTime int `json:"traces_ttl_duration_hrs,omitempty"`
TracesMoveTime int `json:"traces_move_ttl_duration_hrs,omitempty"`
LogsTime int `json:"logs_ttl_duration_hrs,omitempty"`
LogsMoveTime int `json:"logs_move_ttl_duration_hrs,omitempty"`
ExpectedMetricsTime int `json:"expected_metrics_ttl_duration_hrs,omitempty"`
ExpectedMetricsMoveTime int `json:"expected_metrics_move_ttl_duration_hrs,omitempty"`
ExpectedTracesTime int `json:"expected_traces_ttl_duration_hrs,omitempty"`
ExpectedTracesMoveTime int `json:"expected_traces_move_ttl_duration_hrs,omitempty"`
ExpectedLogsTime int `json:"expected_logs_ttl_duration_hrs,omitempty"`
ExpectedLogsMoveTime int `json:"expected_logs_move_ttl_duration_hrs,omitempty"`
Status string `json:"status"`
}

View File

@@ -1,83 +0,0 @@
package retentiontypes
import (
"encoding/json"
"testing"
"time"
"github.com/SigNoz/signoz/pkg/types"
"github.com/stretchr/testify/require"
)
func TestBuildRetentionPolicySegmentsFromRows(t *testing.T) {
start := time.Date(2026, 5, 4, 0, 0, 0, 0, time.UTC)
end := start.AddDate(0, 0, 1)
ruleA := CustomRetentionRule{
Filters: []FilterCondition{{Key: "service.name", Values: []string{"api"}}},
TTLDays: 7,
}
ruleB := CustomRetentionRule{
Filters: []FilterCondition{{Key: "env", Values: []string{"prod"}}},
TTLDays: 15,
}
t.Run("row before window is active at start", func(t *testing.T) {
segments, err := BuildRetentionPolicySegmentsFromRows(
[]*TTLSetting{
ttlSetting(t, start.Add(-time.Hour), 45, []CustomRetentionRule{ruleA}),
},
30,
start.UnixMilli(),
end.UnixMilli(),
)
require.NoError(t, err)
require.Equal(t, []*RetentionPolicySegment{
NewRetentionPolicySegment(start.UnixMilli(), end.UnixMilli(), []CustomRetentionRule{ruleA}, 45),
}, segments)
})
t.Run("row inside window splits segments", func(t *testing.T) {
firstChange := start.Add(6 * time.Hour)
secondChange := start.Add(18 * time.Hour)
segments, err := BuildRetentionPolicySegmentsFromRows(
[]*TTLSetting{
ttlSetting(t, firstChange, 21, []CustomRetentionRule{ruleA}),
ttlSetting(t, secondChange, 14, []CustomRetentionRule{ruleB}),
},
30,
start.UnixMilli(),
end.UnixMilli(),
)
require.NoError(t, err)
require.Equal(t, []*RetentionPolicySegment{
NewRetentionPolicySegment(start.UnixMilli(), firstChange.UnixMilli(), nil, 30),
NewRetentionPolicySegment(firstChange.UnixMilli(), secondChange.UnixMilli(), []CustomRetentionRule{ruleA}, 21),
NewRetentionPolicySegment(secondChange.UnixMilli(), end.UnixMilli(), []CustomRetentionRule{ruleB}, 14),
}, segments)
})
t.Run("no rows uses fallback", func(t *testing.T) {
segments, err := BuildRetentionPolicySegmentsFromRows(nil, 30, start.UnixMilli(), end.UnixMilli())
require.NoError(t, err)
require.Equal(t, []*RetentionPolicySegment{
NewRetentionPolicySegment(start.UnixMilli(), end.UnixMilli(), nil, 30),
}, segments)
})
}
func ttlSetting(t *testing.T, createdAt time.Time, ttlDays int, rules []CustomRetentionRule) *TTLSetting {
t.Helper()
condition, err := json.Marshal(rules)
require.NoError(t, err)
return &TTLSetting{
TimeAuditable: types.TimeAuditable{
CreatedAt: createdAt,
},
TTL: ttlDays,
Condition: string(condition),
}
}

View File

@@ -1,20 +0,0 @@
package zeustypes
import "go.opentelemetry.io/otel/attribute"
var (
// Identifies the organization.
OrganizationID = attribute.Key("signoz.organization.id")
// Identifies the retention bucket a meter belongs to.
RetentionDuration = attribute.Key("signoz.retention.duration")
)
func NewDimensions(kvs ...attribute.KeyValue) map[string]string {
dimensions := map[string]string{}
for _, kv := range kvs {
dimensions[string(kv.Key)] = kv.Value.AsString()
}
return dimensions
}

View File

@@ -1,29 +0,0 @@
package zeustypes
import (
"encoding/json"
)
type GettableDeployment struct {
ID string `json:"id"`
Name string `json:"name"`
Cluster struct {
ID string `json:"id"`
Name string `json:"name"`
Region struct {
ID string `json:"id"`
Name string `json:"name"`
DNS string `json:"dns"`
} `json:"region"`
} `json:"cluster"`
}
func NewGettableDeployment(data []byte) (*GettableDeployment, error) {
deployment := new(GettableDeployment)
err := json.Unmarshal(data, deployment)
if err != nil {
return nil, err
}
return deployment, nil
}

View File

@@ -1,46 +0,0 @@
package zeustypes
import (
"net/url"
"github.com/tidwall/gjson"
)
type Host struct {
Name string `json:"name" required:"true"`
IsDefault bool `json:"is_default" required:"true"`
URL string `json:"url" required:"true"`
}
type GettableHost struct {
Name string `json:"name" required:"true"`
State string `json:"state" required:"true"`
Tier string `json:"tier" required:"true"`
Hosts []Host `json:"hosts" required:"true"`
}
type PostableHost struct {
Name string `json:"name" required:"true"`
}
func NewGettableHost(data []byte) *GettableHost {
parsed := gjson.ParseBytes(data)
dns := parsed.Get("cluster.region.dns").String()
hostResults := parsed.Get("hosts").Array()
hosts := make([]Host, len(hostResults))
for i, h := range hostResults {
name := h.Get("name").String()
hosts[i].Name = name
hosts[i].IsDefault = h.Get("is_default").Bool()
hosts[i].URL = (&url.URL{Scheme: "https", Host: name + "." + dns}).String()
}
return &GettableHost{
Name: parsed.Get("name").String(),
State: parsed.Get("state").String(),
Tier: parsed.Get("tier").String(),
Hosts: hosts,
}
}

View File

@@ -1,54 +0,0 @@
package zeustypes
import "time"
type MeterCheckpoint struct {
Name string
StartDate time.Time
}
type Meter struct {
// MeterName is the fully-qualified meter identifier.
MeterName MeterName `json:"name"`
// Value is the aggregated integer scalar for this meter over the reporting window.
Value int64 `json:"value"`
// Unit is the metric unit for this meter.
Unit MeterUnit `json:"unit"`
// Aggregation names the aggregation applied to produce Value.
Aggregation MeterAggregation `json:"aggregation"`
// StartUnixMilli is the inclusive window start in epoch milliseconds.
StartUnixMilli int64 `json:"start_unix_milli"`
// EndUnixMilli is the exclusive window end in epoch milliseconds.
EndUnixMilli int64 `json:"end_unix_milli"`
// IsCompleted is false for the current day's partial value.
IsCompleted bool `json:"is_completed"`
// Dimensions is the per-meter label set.
Dimensions map[string]string `json:"dimensions"`
}
func NewMeter(
name MeterName,
value int64,
unit MeterUnit,
aggregation MeterAggregation,
window MeterWindow,
dimensions map[string]string,
) Meter {
return Meter{
MeterName: name,
Value: value,
Unit: unit,
Aggregation: aggregation,
StartUnixMilli: window.StartUnixMilli,
EndUnixMilli: window.EndUnixMilli,
IsCompleted: window.IsCompleted,
Dimensions: dimensions,
}
}

View File

@@ -1,12 +0,0 @@
package zeustypes
import "github.com/SigNoz/signoz/pkg/valuer"
type MeterAggregation struct {
valuer.String
}
var (
MeterAggregationSum = MeterAggregation{valuer.NewString("sum")}
MeterAggregationMax = MeterAggregation{valuer.NewString("max")}
)

View File

@@ -1,66 +0,0 @@
package zeustypes
import (
"encoding/json"
"regexp"
"github.com/SigNoz/signoz/pkg/errors"
)
var (
MeterSpanSize = MustNewMeterName("signoz.meter.span.size")
MeterLogSize = MustNewMeterName("signoz.meter.log.size")
MeterDatapointCount = MustNewMeterName("signoz.meter.metric.datapoint.count")
MeterPlatformActive = MustNewMeterName("signoz.meter.platform.active")
)
var meterNameRegex = regexp.MustCompile(`^[a-z][a-z0-9_.]+$`)
// MeterName is a validated dotted Zeus meter name.
type MeterName struct {
s string
}
func NewMeterName(s string) (MeterName, error) {
if !meterNameRegex.MatchString(s) {
return MeterName{}, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "invalid meter name: %s", s)
}
return MeterName{s: s}, nil
}
func MustNewMeterName(s string) MeterName {
name, err := NewMeterName(s)
if err != nil {
panic(err)
}
return name
}
func (n MeterName) String() string {
return n.s
}
func (n MeterName) IsZero() bool {
return n.s == ""
}
func (n MeterName) MarshalJSON() ([]byte, error) {
return json.Marshal(n.s)
}
func (n *MeterName) UnmarshalJSON(data []byte) error {
var s string
if err := json.Unmarshal(data, &s); err != nil {
return err
}
name, err := NewMeterName(s)
if err != nil {
return err
}
*n = name
return nil
}

View File

@@ -1,30 +0,0 @@
package zeustypes
import (
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestNewMeterWindow(t *testing.T) {
start := time.Date(2026, 5, 4, 0, 0, 0, 0, time.UTC)
window, err := NewMeterWindow(start.UnixMilli(), start.AddDate(0, 0, 1).UnixMilli(), true)
require.NoError(t, err)
require.Equal(t, start.UnixMilli(), window.StartUnixMilli)
require.Equal(t, start.AddDate(0, 0, 1).UnixMilli(), window.EndUnixMilli)
require.True(t, window.IsCompleted)
_, err = NewMeterWindow(0, start.UnixMilli(), true)
require.Error(t, err)
_, err = NewMeterWindow(start.UnixMilli(), start.UnixMilli(), false)
require.Error(t, err)
}
func TestMustNewMeterWindowPanicsForInvalidWindow(t *testing.T) {
require.Panics(t, func() {
MustNewMeterWindow(0, 0, true)
})
}

View File

@@ -1,12 +0,0 @@
package zeustypes
import "github.com/SigNoz/signoz/pkg/valuer"
type MeterUnit struct {
valuer.String
}
var (
MeterUnitCount = MeterUnit{valuer.NewString("count")}
MeterUnitBytes = MeterUnit{valuer.NewString("bytes")}
)

View File

@@ -1,43 +0,0 @@
package zeustypes
import (
"time"
"github.com/SigNoz/signoz/pkg/errors"
)
type MeterWindow struct {
StartUnixMilli int64
EndUnixMilli int64
IsCompleted bool
}
func NewMeterWindow(startUnixMilli, endUnixMilli int64, isCompleted bool) (MeterWindow, error) {
if startUnixMilli <= 0 {
return MeterWindow{}, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "meter window start must be positive: %d", startUnixMilli)
}
if endUnixMilli <= startUnixMilli {
return MeterWindow{}, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "meter window end must be after start: [%d, %d)", startUnixMilli, endUnixMilli)
}
return MeterWindow{
StartUnixMilli: startUnixMilli,
EndUnixMilli: endUnixMilli,
IsCompleted: isCompleted,
}, nil
}
func MustNewMeterWindow(startUnixMilli, endUnixMilli int64, isCompleted bool) MeterWindow {
window, err := NewMeterWindow(startUnixMilli, endUnixMilli, isCompleted)
if err != nil {
panic(err)
}
return window
}
func (w MeterWindow) Day() time.Time {
t := time.UnixMilli(w.StartUnixMilli).UTC()
return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.UTC)
}

View File

@@ -1,13 +0,0 @@
package zeustypes
type PostableProfile struct {
UsesOtel bool `json:"uses_otel" required:"true"`
HasExistingObservabilityTool bool `json:"has_existing_observability_tool" required:"true"`
ExistingObservabilityTool string `json:"existing_observability_tool" required:"true"`
ReasonsForInterestInSigNoz []string `json:"reasons_for_interest_in_signoz" required:"true"`
LogsScalePerDayInGB int64 `json:"logs_scale_per_day_in_gb" required:"true"`
NumberOfServices int64 `json:"number_of_services" required:"true"`
NumberOfHosts int64 `json:"number_of_hosts" required:"true"`
WhereDidYouDiscoverSigNoz string `json:"where_did_you_discover_signoz" required:"true"`
TimelineForMigratingToSigNoz string `json:"timeline_for_migrating_to_signoz" required:"true"`
}

View File

@@ -0,0 +1,87 @@
package zeustypes
import (
"encoding/json"
"net/url"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/tidwall/gjson"
)
type PostableHost struct {
Name string `json:"name" required:"true"`
}
type PostableProfile struct {
UsesOtel bool `json:"uses_otel" required:"true"`
HasExistingObservabilityTool bool `json:"has_existing_observability_tool" required:"true"`
ExistingObservabilityTool string `json:"existing_observability_tool" required:"true"`
ReasonsForInterestInSigNoz []string `json:"reasons_for_interest_in_signoz" required:"true"`
LogsScalePerDayInGB int64 `json:"logs_scale_per_day_in_gb" required:"true"`
NumberOfServices int64 `json:"number_of_services" required:"true"`
NumberOfHosts int64 `json:"number_of_hosts" required:"true"`
WhereDidYouDiscoverSigNoz string `json:"where_did_you_discover_signoz" required:"true"`
TimelineForMigratingToSigNoz string `json:"timeline_for_migrating_to_signoz" required:"true"`
}
type GettableHost struct {
Name string `json:"name" required:"true"`
State string `json:"state" required:"true"`
Tier string `json:"tier" required:"true"`
Hosts []Host `json:"hosts" required:"true"`
}
type Host struct {
Name string `json:"name" required:"true"`
IsDefault bool `json:"is_default" required:"true"`
URL string `json:"url" required:"true"`
}
func NewGettableHost(data []byte) *GettableHost {
parsed := gjson.ParseBytes(data)
dns := parsed.Get("cluster.region.dns").String()
hostResults := parsed.Get("hosts").Array()
hosts := make([]Host, len(hostResults))
for i, h := range hostResults {
name := h.Get("name").String()
hosts[i].Name = name
hosts[i].IsDefault = h.Get("is_default").Bool()
hosts[i].URL = (&url.URL{Scheme: "https", Host: name + "." + dns}).String()
}
return &GettableHost{
Name: parsed.Get("name").String(),
State: parsed.Get("state").String(),
Tier: parsed.Get("tier").String(),
Hosts: hosts,
}
}
// GettableDeployment represents the parsed deployment info from zeus.GetDeployment.
// NOTE: this is not a full response structure, add more fields from actual response as per requirement.
type GettableDeployment struct {
ID string `json:"id"`
Name string `json:"name"`
Cluster struct {
ID string `json:"id"`
Name string `json:"name"`
Region struct {
ID string `json:"id"`
Name string `json:"name"`
DNS string `json:"dns"`
} `json:"region"`
} `json:"cluster"`
}
// NewGettableDeployment parses raw GetDeployment bytes into a GettableDeployment.
func NewGettableDeployment(data []byte) (*GettableDeployment, error) {
deployment := new(GettableDeployment)
err := json.Unmarshal(data, deployment)
if err != nil {
return nil, errors.WrapInternalf(err, errors.CodeInternal, "failed to unmarshal deployment response")
}
return deployment, nil
}

View File

@@ -49,14 +49,6 @@ func (provider *provider) PutMetersV2(_ context.Context, _ string, _ []byte) err
return errors.New(errors.TypeUnsupported, zeus.ErrCodeUnsupported, "putting meters v2 is not supported")
}
func (provider *provider) PutMetersV3(_ context.Context, _ string, _ string, _ []byte) error {
return errors.New(errors.TypeUnsupported, zeus.ErrCodeUnsupported, "putting meters v3 is not supported")
}
func (provider *provider) ListMeterCheckpoints(_ context.Context, _ string) ([]zeustypes.MeterCheckpoint, error) {
return nil, errors.New(errors.TypeUnsupported, zeus.ErrCodeUnsupported, "list meter checkpoints is not supported")
}
func (provider *provider) PutProfile(_ context.Context, _ string, _ *zeustypes.PostableProfile) error {
return errors.New(errors.TypeUnsupported, zeus.ErrCodeUnsupported, "putting profile is not supported")
}

View File

@@ -35,16 +35,6 @@ type Zeus interface {
// Puts the meters for the given license key using Zeus.
PutMetersV2(context.Context, string, []byte) error
// PutMetersV3 ships one day's raw JSON array of meter readings to the
// v2/meters endpoint. idempotencyKey is propagated as X-Idempotency-Key so
// Zeus can UPSERT on retries.
PutMetersV3(ctx context.Context, licenseKey string, idempotencyKey string, body []byte) error
// ListMeterCheckpoints returns the latest sealed (is_completed=true) UTC day
// Zeus has stored for each billing meter name. Missing meter names are
// treated by the cron as bootstrap cases.
ListMeterCheckpoints(ctx context.Context, licenseKey string) ([]zeustypes.MeterCheckpoint, error)
// Put profile for the given license key.
PutProfile(context.Context, string, *zeustypes.PostableProfile) error

View File

@@ -72,6 +72,7 @@ class TraceOperatorQuery:
return_spans_from: str
limit: int | None = None
order: list[OrderBy] | None = None
select_fields: list[TelemetryFieldKey] | None = None
def to_dict(self) -> dict:
spec: dict[str, Any] = {
@@ -83,6 +84,8 @@ class TraceOperatorQuery:
spec["limit"] = self.limit
if self.order:
spec["order"] = [o.to_dict() if hasattr(o, "to_dict") else o for o in self.order]
if self.select_fields:
spec["selectFields"] = [f.to_dict() for f in self.select_fields]
return {"type": "builder_trace_operator", "spec": spec}

View File

@@ -0,0 +1,537 @@
"""
Integration tests for TraceOperatorQuery (builder_trace_operator) through the
/api/v5/query_range endpoint.
Covers:
1. Basic trace operator (A => B) — returns matched spans from the correct trace.
2. Order by a field absent from selectFields — must not return a server error.
Guards against the ClickHouse NOT_FOUND_COLUMN_IN_BLOCK regression where
ordering by a column absent from an outer SELECT caused a query failure.
"""
from collections.abc import Callable
from datetime import UTC, datetime, timedelta
from http import HTTPStatus
from fixtures import types
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.querier import (
OrderBy,
TelemetryFieldKey,
TraceOperatorQuery,
make_query_request,
)
from fixtures.traces import TraceIdGenerator, Traces, TracesKind, TracesStatusCode
def _builder_query(name: str, filter_expr: str, limit: int = 100) -> dict:
return {
"type": "builder_query",
"spec": {
"name": name,
"signal": "traces",
"filter": {"expression": filter_expr},
"limit": limit,
},
}
def test_trace_operator_query_basic(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_traces: Callable[[list[Traces]], None],
) -> None:
"""
Setup:
Insert one parent span and one child span in the same trace.
Tests:
A => B (parent has a direct child) returns the parent span (returnSpansFrom=A)
from the correct trace.
"""
parent_trace_id = TraceIdGenerator.trace_id()
parent_span_id = TraceIdGenerator.span_id()
child_span_id = TraceIdGenerator.span_id()
now = datetime.now(tz=UTC).replace(second=0, microsecond=0)
insert_traces(
[
Traces(
timestamp=now - timedelta(seconds=10),
duration=timedelta(seconds=5),
trace_id=parent_trace_id,
span_id=parent_span_id,
parent_span_id="",
name="parent-op",
kind=TracesKind.SPAN_KIND_SERVER,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={"service.name": "svc-a"},
attributes={"operation.type": "parent"},
),
Traces(
timestamp=now - timedelta(seconds=9),
duration=timedelta(seconds=2),
trace_id=parent_trace_id,
span_id=child_span_id,
parent_span_id=parent_span_id,
name="child-op",
kind=TracesKind.SPAN_KIND_INTERNAL,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={"service.name": "svc-a"},
attributes={"operation.type": "child"},
),
]
)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
start_ms = int((now - timedelta(minutes=5)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
response = make_query_request(
signoz,
token,
start_ms=start_ms,
end_ms=end_ms,
request_type="raw",
queries=[
_builder_query("A", "operation.type = 'parent'"),
_builder_query("B", "operation.type = 'child'"),
TraceOperatorQuery(
name="C",
expression="A => B",
return_spans_from="A",
limit=100,
).to_dict(),
],
)
assert response.status_code == HTTPStatus.OK, response.text
assert response.json()["status"] == "success"
results = response.json()["data"]["data"]["results"]
assert len(results) == 1
rows = results[0].get("rows") or []
assert len(rows) == 1
assert rows[0]["data"]["trace_id"] == parent_trace_id
assert rows[0]["data"]["name"] == "parent-op"
def test_trace_operator_query_order_by_field_not_in_select_fields(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_traces: Callable[[list[Traces]], None],
) -> None:
"""
Setup:
Two traces, each with a grandparent → middle → grandchild chain:
Trace 1: grandparent (svc-a, http.method=POST) → middle → grandchild
Trace 2: grandparent (svc-b, http.method=GET) → middle → grandchild
Tests:
A -> B (indirect descendant) with selectFields=[service.name] and
order=[http.method DESC], where http.method is NOT in selectFields.
1. Query succeeds (no NOT_FOUND_COLUMN_IN_BLOCK error from ClickHouse).
2. Results are actually ordered: POST sorts before GET descending, so
svc-a must come before svc-b.
"""
trace_id_1 = TraceIdGenerator.trace_id()
trace_id_2 = TraceIdGenerator.trace_id()
gp_span_id_1 = TraceIdGenerator.span_id()
mid_span_id_1 = TraceIdGenerator.span_id()
gc_span_id_1 = TraceIdGenerator.span_id()
gp_span_id_2 = TraceIdGenerator.span_id()
mid_span_id_2 = TraceIdGenerator.span_id()
gc_span_id_2 = TraceIdGenerator.span_id()
now = datetime.now(tz=UTC).replace(second=0, microsecond=0)
insert_traces(
[
# Trace 1 — grandparent has http.method=POST (sorts first in DESC)
Traces(
timestamp=now - timedelta(seconds=10),
duration=timedelta(seconds=5),
trace_id=trace_id_1,
span_id=gp_span_id_1,
parent_span_id="",
name="gp-op",
kind=TracesKind.SPAN_KIND_SERVER,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={"service.name": "svc-a"},
attributes={"operation.type": "grandparent", "http.method": "POST"},
),
Traces(
timestamp=now - timedelta(seconds=9),
duration=timedelta(seconds=3),
trace_id=trace_id_1,
span_id=mid_span_id_1,
parent_span_id=gp_span_id_1,
name="mid-op",
kind=TracesKind.SPAN_KIND_INTERNAL,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={"service.name": "svc-a"},
attributes={"operation.type": "middle"},
),
Traces(
timestamp=now - timedelta(seconds=8),
duration=timedelta(seconds=1),
trace_id=trace_id_1,
span_id=gc_span_id_1,
parent_span_id=mid_span_id_1,
name="gc-op",
kind=TracesKind.SPAN_KIND_INTERNAL,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={"service.name": "svc-a"},
attributes={"operation.type": "grandchild"},
),
# Trace 2 — grandparent has http.method=GET (sorts second in DESC)
Traces(
timestamp=now - timedelta(seconds=7),
duration=timedelta(seconds=5),
trace_id=trace_id_2,
span_id=gp_span_id_2,
parent_span_id="",
name="gp-op",
kind=TracesKind.SPAN_KIND_SERVER,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={"service.name": "svc-b"},
attributes={"operation.type": "grandparent", "http.method": "GET"},
),
Traces(
timestamp=now - timedelta(seconds=6),
duration=timedelta(seconds=3),
trace_id=trace_id_2,
span_id=mid_span_id_2,
parent_span_id=gp_span_id_2,
name="mid-op",
kind=TracesKind.SPAN_KIND_INTERNAL,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={"service.name": "svc-b"},
attributes={"operation.type": "middle"},
),
Traces(
timestamp=now - timedelta(seconds=5),
duration=timedelta(seconds=1),
trace_id=trace_id_2,
span_id=gc_span_id_2,
parent_span_id=mid_span_id_2,
name="gc-op",
kind=TracesKind.SPAN_KIND_INTERNAL,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={"service.name": "svc-b"},
attributes={"operation.type": "grandchild"},
),
]
)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
start_ms = int((now - timedelta(minutes=5)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
response = make_query_request(
signoz,
token,
start_ms=start_ms,
end_ms=end_ms,
request_type="raw",
queries=[
_builder_query("A", "operation.type = 'grandparent'"),
_builder_query("B", "operation.type = 'grandchild'"),
TraceOperatorQuery(
name="C",
expression="A -> B", # indirect descendant
return_spans_from="A",
limit=100,
select_fields=[
TelemetryFieldKey(name="service.name", field_data_type="string", field_context="resource"),
],
order=[
# http.method is intentionally absent from select_fields
OrderBy(
key=TelemetryFieldKey(name="http.method", field_data_type="string", field_context="attribute"),
direction="desc",
),
],
).to_dict(),
],
)
assert response.status_code == HTTPStatus.OK, response.text
assert response.json()["status"] == "success"
results = response.json()["data"]["data"]["results"]
assert len(results) == 1
rows = results[0].get("rows") or []
# Both grandparent spans must be returned
assert len(rows) == 2
# Ordering: POST > GET in DESC — svc-a (POST) must come before svc-b (GET)
assert rows[0]["data"]["service.name"] == "svc-a", f"Expected svc-a (POST) first in http.method DESC order, got {rows[0]['data']['service.name']}"
assert rows[1]["data"]["service.name"] == "svc-b", f"Expected svc-b (GET) second in http.method DESC order, got {rows[1]['data']['service.name']}"
def test_trace_operator_query_order_by_select_field(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_traces: Callable[[list[Traces]], None],
) -> None:
"""
Setup:
Two traces each with a parent → child pair; the parents have distinct durations
(5 s and 1 s).
Tests:
A => B with order=[duration_nano DESC] and no explicit selectFields.
1. Query succeeds (no NOT_FOUND_COLUMN_IN_BLOCK error).
2. Longer-duration parent (5 s) appears before the shorter one (1 s).
"""
trace_id_1 = TraceIdGenerator.trace_id()
trace_id_2 = TraceIdGenerator.trace_id()
parent_span_id_1 = TraceIdGenerator.span_id()
child_span_id_1 = TraceIdGenerator.span_id()
parent_span_id_2 = TraceIdGenerator.span_id()
child_span_id_2 = TraceIdGenerator.span_id()
now = datetime.now(tz=UTC).replace(second=0, microsecond=0)
insert_traces(
[
Traces(
timestamp=now - timedelta(seconds=10),
duration=timedelta(seconds=5),
trace_id=trace_id_1,
span_id=parent_span_id_1,
parent_span_id="",
name="parent-long",
kind=TracesKind.SPAN_KIND_SERVER,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={"service.name": "svc-long"},
attributes={"operation.type": "parent"},
),
Traces(
timestamp=now - timedelta(seconds=9),
duration=timedelta(seconds=1),
trace_id=trace_id_1,
span_id=child_span_id_1,
parent_span_id=parent_span_id_1,
name="child-long",
kind=TracesKind.SPAN_KIND_INTERNAL,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={"service.name": "svc-long"},
attributes={"operation.type": "child"},
),
Traces(
timestamp=now - timedelta(seconds=8),
duration=timedelta(seconds=1),
trace_id=trace_id_2,
span_id=parent_span_id_2,
parent_span_id="",
name="parent-short",
kind=TracesKind.SPAN_KIND_SERVER,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={"service.name": "svc-short"},
attributes={"operation.type": "parent"},
),
Traces(
timestamp=now - timedelta(seconds=7),
duration=timedelta(seconds=1),
trace_id=trace_id_2,
span_id=child_span_id_2,
parent_span_id=parent_span_id_2,
name="child-short",
kind=TracesKind.SPAN_KIND_INTERNAL,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={"service.name": "svc-short"},
attributes={"operation.type": "child"},
),
]
)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
start_ms = int((now - timedelta(minutes=5)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
response = make_query_request(
signoz,
token,
start_ms=start_ms,
end_ms=end_ms,
request_type="raw",
queries=[
_builder_query("A", "operation.type = 'parent'"),
_builder_query("B", "operation.type = 'child'"),
TraceOperatorQuery(
name="C",
expression="A => B",
return_spans_from="A",
limit=100,
order=[
OrderBy(
key=TelemetryFieldKey(name="duration_nano", field_context="span"),
direction="desc",
),
],
).to_dict(),
],
)
assert response.status_code == HTTPStatus.OK, response.text
assert response.json()["status"] == "success"
results = response.json()["data"]["data"]["results"]
assert len(results) == 1
rows = results[0].get("rows") or []
assert len(rows) == 2
# DESC: 5 s parent first, 1 s parent second
assert rows[0]["data"]["name"] == "parent-long", f"Expected parent-long (5s) first in duration_nano DESC, got {rows[0]['data']['name']}"
assert rows[1]["data"]["name"] == "parent-short", f"Expected parent-short (1s) second in duration_nano DESC, got {rows[1]['data']['name']}"
def test_trace_operator_query_order_by_non_core_field_in_select(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_traces: Callable[[list[Traces]], None],
) -> None:
"""
Setup:
Two traces each with a parent → child pair; parents have distinct http.method
values (POST and GET).
Tests:
A => B with selectFields=[http.method] and order=[http.method DESC].
1. Query succeeds (no NOT_FOUND_COLUMN_IN_BLOCK error).
2. http.method is present in every result row (it is in selectFields).
3. Results are ordered DESC — POST before GET.
"""
trace_id_1 = TraceIdGenerator.trace_id()
trace_id_2 = TraceIdGenerator.trace_id()
parent_span_id_1 = TraceIdGenerator.span_id()
child_span_id_1 = TraceIdGenerator.span_id()
parent_span_id_2 = TraceIdGenerator.span_id()
child_span_id_2 = TraceIdGenerator.span_id()
now = datetime.now(tz=UTC).replace(second=0, microsecond=0)
insert_traces(
[
Traces(
timestamp=now - timedelta(seconds=10),
duration=timedelta(seconds=3),
trace_id=trace_id_1,
span_id=parent_span_id_1,
parent_span_id="",
name="parent-post",
kind=TracesKind.SPAN_KIND_SERVER,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={"service.name": "svc-post"},
attributes={"operation.type": "parent", "http.method": "POST"},
),
Traces(
timestamp=now - timedelta(seconds=9),
duration=timedelta(seconds=1),
trace_id=trace_id_1,
span_id=child_span_id_1,
parent_span_id=parent_span_id_1,
name="child-post",
kind=TracesKind.SPAN_KIND_INTERNAL,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={"service.name": "svc-post"},
attributes={"operation.type": "child"},
),
Traces(
timestamp=now - timedelta(seconds=8),
duration=timedelta(seconds=3),
trace_id=trace_id_2,
span_id=parent_span_id_2,
parent_span_id="",
name="parent-get",
kind=TracesKind.SPAN_KIND_SERVER,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={"service.name": "svc-get"},
attributes={"operation.type": "parent", "http.method": "GET"},
),
Traces(
timestamp=now - timedelta(seconds=7),
duration=timedelta(seconds=1),
trace_id=trace_id_2,
span_id=child_span_id_2,
parent_span_id=parent_span_id_2,
name="child-get",
kind=TracesKind.SPAN_KIND_INTERNAL,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={"service.name": "svc-get"},
attributes={"operation.type": "child"},
),
]
)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
start_ms = int((now - timedelta(minutes=5)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
http_method_field = TelemetryFieldKey(
name="http.method",
field_data_type="string",
field_context="attribute",
)
response = make_query_request(
signoz,
token,
start_ms=start_ms,
end_ms=end_ms,
request_type="raw",
queries=[
_builder_query("A", "operation.type = 'parent'"),
_builder_query("B", "operation.type = 'child'"),
TraceOperatorQuery(
name="C",
expression="A => B",
return_spans_from="A",
limit=100,
select_fields=[http_method_field],
order=[OrderBy(key=http_method_field, direction="desc")],
).to_dict(),
],
)
assert response.status_code == HTTPStatus.OK, response.text
assert response.json()["status"] == "success"
results = response.json()["data"]["data"]["results"]
assert len(results) == 1
rows = results[0].get("rows") or []
assert len(rows) == 2
# http.method must be present in every row (it is in selectFields)
for row in rows:
assert "http.method" in row["data"], f"http.method missing from row: {row['data']}"
# DESC: POST before GET
assert rows[0]["data"]["http.method"] == "POST", f"Expected POST first in http.method DESC, got {rows[0]['data']['http.method']}"
assert rows[1]["data"]["http.method"] == "GET", f"Expected GET second in http.method DESC, got {rows[1]['data']['http.method']}"