Compare commits

..

7 Commits

Author SHA1 Message Date
nityanandagohain
04824cf2f2 fix: lint 2026-05-11 22:06:29 +05:30
Nityananda Gohain
384c649ef8 Merge branch 'main' into issue_4522 2026-05-11 22:03:43 +05:30
nityanandagohain
68693f8ffd fix: more updated 2026-05-11 22:03:05 +05:30
nityanandagohain
ca1f92f474 fix: get keys after modifying the selectkeys 2026-05-11 21:19:33 +05:30
nityanandagohain
1ed3d8fc8c fix: minor changes 2026-05-11 20:25:05 +05:30
nityanandagohain
196aa301c4 Merge remote-tracking branch 'origin/main' into issue_4522 2026-05-11 15:06:05 +05:30
nityanandagohain
51fcc22d8a feat: [traces] time aware dynamic field mapper 2026-05-08 18:11:15 +05:30
91 changed files with 2014 additions and 3437 deletions

View File

@@ -18,19 +18,16 @@ import (
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/gateway"
"github.com/SigNoz/signoz/pkg/gateway/noopgateway"
"github.com/SigNoz/signoz/pkg/global"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/licensing/nooplicensing"
"github.com/SigNoz/signoz/pkg/meterreporter"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration/implcloudintegration"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/modules/retention"
"github.com/SigNoz/signoz/pkg/modules/rulestatehistory"
"github.com/SigNoz/signoz/pkg/modules/serviceaccount"
"github.com/SigNoz/signoz/pkg/prometheus"
@@ -112,9 +109,6 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
func(_ licensing.Licensing) factory.NamedMap[factory.ProviderFactory[auditor.Auditor, auditor.Config]] {
return signoz.NewAuditorProviderFactories()
},
func(_ context.Context, _ factory.ProviderSettings, _ flagger.Flagger, _ licensing.Licensing, _ telemetrystore.TelemetryStore, _ retention.Getter, _ organization.Getter, _ zeus.Zeus) (factory.NamedMap[factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config]], string) {
return signoz.NewMeterReporterProviderFactories(), "noop"
},
func(ps factory.ProviderSettings, q querier.Querier, a analytics.Analytics) querier.Handler {
return querier.NewHandler(ps, q, a)
},

View File

@@ -1,55 +0,0 @@
package main
import (
"github.com/SigNoz/signoz/pkg/metercollector"
"github.com/SigNoz/signoz/pkg/telemetrylogs"
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
"github.com/SigNoz/signoz/pkg/telemetrytraces"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
"github.com/SigNoz/signoz/pkg/types/zeustypes"
)
var meterConfigs = []metercollector.Config{
{
Provider: metercollector.ProviderStatic,
Static: metercollector.StaticConfig{
Name: zeustypes.MeterPlatformActive,
Unit: zeustypes.MeterUnitCount,
Aggregation: zeustypes.MeterAggregationMax,
Value: 1,
},
},
{
Provider: metercollector.ProviderTelemetry,
Telemetry: metercollector.TelemetryConfig{
Name: zeustypes.MeterLogSize,
Unit: zeustypes.MeterUnitBytes,
Aggregation: zeustypes.MeterAggregationSum,
DBName: telemetrylogs.DBName,
TableName: telemetrylogs.LogsV2LocalTableName,
DefaultRetentionDays: retentiontypes.DefaultLogsRetentionDays,
},
},
{
Provider: metercollector.ProviderTelemetry,
Telemetry: metercollector.TelemetryConfig{
Name: zeustypes.MeterSpanSize,
Unit: zeustypes.MeterUnitBytes,
Aggregation: zeustypes.MeterAggregationSum,
DBName: telemetrytraces.DBName,
TableName: telemetrytraces.SpanIndexV3LocalTableName,
DefaultRetentionDays: retentiontypes.DefaultTracesRetentionDays,
},
},
{
Provider: metercollector.ProviderTelemetry,
Telemetry: metercollector.TelemetryConfig{
Name: zeustypes.MeterDatapointCount,
Unit: zeustypes.MeterUnitCount,
Aggregation: zeustypes.MeterAggregationSum,
DBName: telemetrymetrics.DBName,
TableName: telemetrymetrics.SamplesV4LocalTableName,
DefaultRetentionDays: retentiontypes.DefaultMetricsRetentionDays,
},
},
}

View File

@@ -18,9 +18,6 @@ import (
"github.com/SigNoz/signoz/ee/gateway/httpgateway"
enterpriselicensing "github.com/SigNoz/signoz/ee/licensing"
"github.com/SigNoz/signoz/ee/licensing/httplicensing"
"github.com/SigNoz/signoz/ee/metercollector/staticmetercollector"
"github.com/SigNoz/signoz/ee/metercollector/telemetrymetercollector"
"github.com/SigNoz/signoz/ee/meterreporter/httpmeterreporter"
"github.com/SigNoz/signoz/ee/modules/cloudintegration/implcloudintegration"
"github.com/SigNoz/signoz/ee/modules/cloudintegration/implcloudintegration/implcloudprovider"
"github.com/SigNoz/signoz/ee/modules/dashboard/impldashboard"
@@ -39,17 +36,14 @@ import (
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
pkgflagger "github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/gateway"
"github.com/SigNoz/signoz/pkg/global"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/meterreporter"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
pkgcloudintegration "github.com/SigNoz/signoz/pkg/modules/cloudintegration/implcloudintegration"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
pkgimpldashboard "github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/modules/retention"
"github.com/SigNoz/signoz/pkg/modules/rulestatehistory"
"github.com/SigNoz/signoz/pkg/modules/serviceaccount"
"github.com/SigNoz/signoz/pkg/prometheus"
@@ -167,20 +161,6 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
}
return factories
},
func(ctx context.Context, providerSettings factory.ProviderSettings, flagger pkgflagger.Flagger, licensing licensing.Licensing, telemetryStore telemetrystore.TelemetryStore, retentionGetter retention.Getter, orgGetter organization.Getter, zeus zeus.Zeus) (factory.NamedMap[factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config]], string) {
factories := signoz.NewMeterReporterProviderFactories()
collectorFactories := factory.MustNewNamedMap(
staticmetercollector.NewFactory(),
telemetrymetercollector.NewFactory(telemetryStore, retentionGetter),
)
if err := factories.Add(httpmeterreporter.NewFactory(collectorFactories, meterConfigs, flagger, licensing, orgGetter, zeus)); err != nil {
panic(err)
}
return factories, "http"
},
func(ps factory.ProviderSettings, q querier.Querier, a analytics.Analytics) querier.Handler {
communityHandler := querier.NewHandler(ps, q, a)
return eequerier.NewHandler(ps, q, communityHandler)

View File

@@ -429,10 +429,3 @@ authz:
openfga:
# maximum tuples allowed per openfga write operation.
max_tuples_per_write: 300
##################### Meter Reporter #####################
meterreporter:
# The interval between collection ticks. Minimum 5m.
interval: 6h
# Whether to backfill sealed days from the license creation day.
backfill: true

View File

@@ -1,61 +0,0 @@
// Package staticmetercollector emits a fixed-value meter reading per org per window.
package staticmetercollector
import (
"context"
"time"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/metercollector"
"github.com/SigNoz/signoz/pkg/types/licensetypes"
"github.com/SigNoz/signoz/pkg/types/zeustypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
var _ metercollector.MeterCollector = (*Provider)(nil)
type Provider struct {
settings factory.ScopedProviderSettings
config metercollector.StaticConfig
}
func NewFactory() factory.ProviderFactory[metercollector.MeterCollector, metercollector.Config] {
return factory.NewProviderFactory(factory.MustNewName(metercollector.ProviderStatic), func(ctx context.Context, providerSettings factory.ProviderSettings, config metercollector.Config) (metercollector.MeterCollector, error) {
return newProvider(providerSettings, config.Static), nil
},
)
}
func newProvider(providerSettings factory.ProviderSettings, config metercollector.StaticConfig) *Provider {
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/ee/metercollector/staticmetercollector")
return &Provider{
settings: settings,
config: config,
}
}
func (provider *Provider) Name() zeustypes.MeterName { return provider.config.Name }
func (provider *Provider) Unit() zeustypes.MeterUnit { return provider.config.Unit }
func (provider *Provider) Aggregation() zeustypes.MeterAggregation {
return provider.config.Aggregation
}
func (provider *Provider) Origin(_ context.Context, _ valuer.UUID, license *licensetypes.License, _ time.Time) (time.Time, error) {
if license == nil || license.CreatedAt.IsZero() {
return time.Time{}, nil
}
createdAt := license.CreatedAt.UTC()
return time.Date(createdAt.Year(), createdAt.Month(), createdAt.Day(), 0, 0, 0, 0, time.UTC), nil
}
func (provider *Provider) Collect(_ context.Context, orgID valuer.UUID, license *licensetypes.License, window zeustypes.MeterWindow) ([]zeustypes.Meter, error) {
if license == nil || license.Key == "" {
return nil, nil
}
return []zeustypes.Meter{
zeustypes.NewMeter(provider.config.Name, provider.config.Value, provider.config.Unit, provider.config.Aggregation, window, zeustypes.NewDimensions(zeustypes.OrganizationID.String(orgID.StringValue()))),
}, nil
}

View File

@@ -1,247 +0,0 @@
// Package telemetrymetercollector collects telemetry meters (logs, traces, metrics)
// by retention. One Provider materializes per TelemetryConfig.
package telemetrymetercollector
import (
"context"
"fmt"
"regexp"
"strconv"
"strings"
"time"
"github.com/huandu/go-sqlbuilder"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/metercollector"
"github.com/SigNoz/signoz/pkg/modules/retention"
"github.com/SigNoz/signoz/pkg/telemetrymeter"
"github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/types/licensetypes"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
"github.com/SigNoz/signoz/pkg/types/zeustypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
var (
labelKeyPattern = regexp.MustCompile(`^[A-Za-z0-9_.\-]+$`)
labelValuePattern = regexp.MustCompile(`^[A-Za-z0-9_.\-:]+$`)
)
var _ metercollector.MeterCollector = (*Provider)(nil)
type Provider struct {
settings factory.ScopedProviderSettings
config metercollector.TelemetryConfig
telemetryStore telemetrystore.TelemetryStore
retentionGetter retention.Getter
}
func NewFactory(telemetryStore telemetrystore.TelemetryStore, retentionGetter retention.Getter) factory.ProviderFactory[metercollector.MeterCollector, metercollector.Config] {
return factory.NewProviderFactory(factory.MustNewName(metercollector.ProviderTelemetry), func(ctx context.Context, providerSettings factory.ProviderSettings, config metercollector.Config) (metercollector.MeterCollector, error) {
return newProvider(providerSettings, config.Telemetry, telemetryStore, retentionGetter), nil
},
)
}
func newProvider(
providerSettings factory.ProviderSettings,
config metercollector.TelemetryConfig,
telemetryStore telemetrystore.TelemetryStore,
retentionGetter retention.Getter,
) *Provider {
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/ee/metercollector/telemetrymetercollector")
return &Provider{
settings: settings,
config: config,
telemetryStore: telemetryStore,
retentionGetter: retentionGetter,
}
}
func (provider *Provider) Name() zeustypes.MeterName { return provider.config.Name }
func (provider *Provider) Unit() zeustypes.MeterUnit { return provider.config.Unit }
func (provider *Provider) Aggregation() zeustypes.MeterAggregation {
return provider.config.Aggregation
}
func (provider *Provider) Origin(ctx context.Context, _ valuer.UUID, _ *licensetypes.License, todayStart time.Time) (time.Time, error) {
query, args := buildOriginQuery(provider.config.Name.String())
var minMs int64
if err := provider.telemetryStore.ClickhouseDB().QueryRow(ctx, query, args...).Scan(&minMs); err != nil {
return time.Time{}, err
}
if minMs == 0 {
return todayStart, nil
}
minDay := time.UnixMilli(minMs).UTC()
return time.Date(minDay.Year(), minDay.Month(), minDay.Day(), 0, 0, 0, 0, time.UTC), nil
}
func (provider *Provider) Collect(
ctx context.Context,
orgID valuer.UUID,
_ *licensetypes.License,
window zeustypes.MeterWindow,
) ([]zeustypes.Meter, error) {
meterName := provider.config.Name.String()
segments, err := provider.retentionGetter.GetRetentionPolicySegments(
ctx,
orgID,
provider.config.DBName,
provider.config.TableName,
provider.config.DefaultRetentionDays,
window.StartUnixMilli,
window.EndUnixMilli,
)
if err != nil {
return nil, err
}
valuesByRetentionDays := make(map[int]int64)
for _, segment := range segments {
query, args, err := buildQuery(meterName, segment)
if err != nil {
return nil, err
}
rows, err := provider.telemetryStore.ClickhouseDB().Query(ctx, query, args...)
if err != nil {
return nil, err
}
if err := func() error {
defer rows.Close()
for rows.Next() {
var retentionDays int32
var value int64
if err := rows.Scan(&retentionDays, &value); err != nil {
return err
}
valuesByRetentionDays[int(retentionDays)] += value
}
if err := rows.Err(); err != nil {
return err
}
return nil
}(); err != nil {
return nil, err
}
}
meters := make([]zeustypes.Meter, 0, len(valuesByRetentionDays))
for retentionDays, value := range valuesByRetentionDays {
meters = append(meters, zeustypes.NewMeter(provider.config.Name, value, provider.config.Unit, provider.config.Aggregation, window, buildDimensions(orgID, retentionDays)))
}
// Empty windows still emit a sentinel so checkpoints can advance.
if len(meters) == 0 && len(segments) > 0 {
meters = append(meters, zeustypes.NewMeter(provider.config.Name, 0, provider.config.Unit, provider.config.Aggregation, window, buildDimensions(orgID, segments[len(segments)-1].DefaultDays)))
}
return meters, nil
}
func buildOriginQuery(meterName string) (string, []any) {
sb := sqlbuilder.NewSelectBuilder()
sb.Select("toInt64(ifNull(min(unix_milli), 0))")
sb.From(telemetrymeter.DBName + "." + telemetrymeter.SamplesTableName)
sb.Where(sb.Equal("metric_name", meterName))
return sb.BuildWithFlavor(sqlbuilder.ClickHouse)
}
func buildQuery(meterName string, segment *retentiontypes.RetentionPolicySegment) (string, []any, error) {
retentionExpr, err := buildRetentionMultiIfSQL(segment.Rules, segment.DefaultDays)
if err != nil {
return "", nil, err
}
selects := []string{
retentionExpr + " AS retention_days",
"toInt64(ifNull(sum(value), 0)) AS value",
}
sb := sqlbuilder.NewSelectBuilder()
sb.Select(selects...)
sb.From(telemetrymeter.DBName + "." + telemetrymeter.SamplesTableName)
sb.Where(
sb.Equal("metric_name", meterName),
sb.GTE("unix_milli", segment.StartMs),
sb.LT("unix_milli", segment.EndMs),
)
sb.GroupBy("retention_days")
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
return query, args, nil
}
func buildRetentionMultiIfSQL(rules []retentiontypes.CustomRetentionRule, defaultDays int) (string, error) {
if defaultDays <= 0 {
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "non-positive default retention %d", defaultDays)
}
if len(rules) == 0 {
return "toInt32(" + strconv.Itoa(defaultDays) + ")", nil
}
arms := make([]string, 0, 2*len(rules)+1)
for ruleIndex, rule := range rules {
if rule.TTLDays <= 0 {
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "rule %d has non-positive ttl_days %d", ruleIndex, rule.TTLDays)
}
conditionExpr, err := buildRuleConditionSQL(ruleIndex, rule)
if err != nil {
return "", err
}
arms = append(arms, conditionExpr)
arms = append(arms, strconv.Itoa(rule.TTLDays))
}
arms = append(arms, strconv.Itoa(defaultDays))
return "toInt32(multiIf(" + strings.Join(arms, ", ") + "))", nil
}
func buildRuleConditionSQL(ruleIndex int, rule retentiontypes.CustomRetentionRule) (string, error) {
if len(rule.Filters) == 0 {
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "rule %d has no filters", ruleIndex)
}
filterExprs := make([]string, 0, len(rule.Filters))
for filterIndex, filter := range rule.Filters {
if !labelKeyPattern.MatchString(filter.Key) {
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "rule %d filter %d has invalid key %q", ruleIndex, filterIndex, filter.Key)
}
if len(filter.Values) == 0 {
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "rule %d filter %d has no values", ruleIndex, filterIndex)
}
quoted := make([]string, len(filter.Values))
for valueIndex, value := range filter.Values {
if !labelValuePattern.MatchString(value) {
return "", errors.Newf(errors.TypeInvalidInput, metercollector.ErrCodeMeterCollectorInvalidCustomRetentionRule, "rule %d filter %d value %d is invalid %q", ruleIndex, filterIndex, valueIndex, value)
}
quoted[valueIndex] = "'" + value + "'"
}
filterExprs = append(filterExprs, fmt.Sprintf("JSONExtractString(labels, '%s') IN (%s)", filter.Key, strings.Join(quoted, ", ")))
}
return strings.Join(filterExprs, " AND "), nil
}
func buildDimensions(orgID valuer.UUID, retentionDays int) map[string]string {
retentionDurationSeconds := int64(retentionDays) * 24 * 60 * 60 // seconds
return zeustypes.NewDimensions(
zeustypes.OrganizationID.String(orgID.StringValue()),
zeustypes.RetentionDuration.String(strconv.FormatInt(retentionDurationSeconds, 10)),
)
}

View File

@@ -1,318 +0,0 @@
package httpmeterreporter
import (
"context"
"encoding/json"
"fmt"
"log/slog"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/metercollector"
"github.com/SigNoz/signoz/pkg/meterreporter"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/featuretypes"
"github.com/SigNoz/signoz/pkg/types/licensetypes"
"github.com/SigNoz/signoz/pkg/types/zeustypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/SigNoz/signoz/pkg/zeus"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/trace"
)
var _ factory.ServiceWithHealthy = (*Provider)(nil)
type Provider struct {
settings factory.ScopedProviderSettings
config meterreporter.Config
collectorsByName map[zeustypes.MeterName]metercollector.MeterCollector
flagger flagger.Flagger
licensing licensing.Licensing
orgGetter organization.Getter
zeus zeus.Zeus
healthyC chan struct{}
stopC chan struct{}
metrics *reporterMetrics
}
func NewFactory(collectorFactories factory.NamedMap[factory.ProviderFactory[metercollector.MeterCollector, metercollector.Config]], collectorConfigs []metercollector.Config, flagger flagger.Flagger, licensing licensing.Licensing, orgGetter organization.Getter, zeus zeus.Zeus) factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config] {
return factory.NewProviderFactory(factory.MustNewName("http"), func(ctx context.Context, providerSettings factory.ProviderSettings, config meterreporter.Config) (meterreporter.Reporter, error) {
return newProvider(ctx, providerSettings, config, collectorFactories, collectorConfigs, flagger, licensing, orgGetter, zeus)
},
)
}
func newProvider(
ctx context.Context,
providerSettings factory.ProviderSettings,
config meterreporter.Config,
collectorFactories factory.NamedMap[factory.ProviderFactory[metercollector.MeterCollector, metercollector.Config]],
collectorConfigs []metercollector.Config,
flagger flagger.Flagger,
licensing licensing.Licensing,
orgGetter organization.Getter,
zeus zeus.Zeus,
) (*Provider, error) {
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/ee/meterreporter/httpmeterreporter")
collectorsByName := map[zeustypes.MeterName]metercollector.MeterCollector{}
for _, collectorConfig := range collectorConfigs {
collector, err := factory.NewProviderFromNamedMap(ctx, providerSettings, collectorConfig, collectorFactories, collectorConfig.Provider)
if err != nil {
return nil, err
}
if _, exists := collectorsByName[collector.Name()]; exists {
return nil, errors.Newf(errors.TypeAlreadyExists, errors.CodeAlreadyExists, "duplicate meter collector %q", collector.Name())
}
collectorsByName[collector.Name()] = collector
}
metrics, err := newReporterMetrics(settings.Meter())
if err != nil {
return nil, err
}
return &Provider{
settings: settings,
config: config,
collectorsByName: collectorsByName,
flagger: flagger,
licensing: licensing,
orgGetter: orgGetter,
zeus: zeus,
healthyC: make(chan struct{}),
stopC: make(chan struct{}),
metrics: metrics,
}, nil
}
func (provider *Provider) Start(ctx context.Context) error {
close(provider.healthyC)
provider.collect(ctx)
ticker := time.NewTicker(provider.config.Interval)
defer ticker.Stop()
for {
select {
case <-provider.stopC:
return nil
case <-ticker.C:
provider.collect(ctx)
}
}
}
func (provider *Provider) collect(ctx context.Context) {
ctx, span := provider.settings.Tracer().Start(ctx, "meterreporter.Collect", trace.WithAttributes(attribute.String("meterreporter.provider", "http")))
defer span.End()
orgs, err := provider.orgGetter.ListByOwnedKeyRange(ctx)
if err != nil {
span.RecordError(err)
provider.settings.Logger().ErrorContext(ctx, "failed to get orgs data", errors.Attr(err))
return
}
for _, org := range orgs {
evalCtx := featuretypes.NewFlaggerEvaluationContext(org.ID)
if !provider.flagger.BooleanOrEmpty(ctx, flagger.FeatureUseMeterReporter, evalCtx) {
provider.settings.Logger().DebugContext(ctx, "meter reporter disabled for org, skipping reporting", slog.String("org_id", org.ID.StringValue()))
continue
}
license, err := provider.licensing.GetActive(ctx, org.ID)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
provider.settings.Logger().DebugContext(ctx, "no active license found for org, skipping reporting", slog.String("org_id", org.ID.StringValue()))
continue
}
span.RecordError(err)
provider.settings.Logger().ErrorContext(ctx, "failed to fetch active license for org", errors.Attr(err), slog.String("org_id", org.ID.StringValue()))
return
}
if err := provider.collectOrg(ctx, org, license); err != nil {
span.RecordError(err)
provider.settings.Logger().ErrorContext(ctx, "failed to collect meters", errors.Attr(err), slog.String("org_id", org.ID.StringValue()))
}
}
}
func (provider *Provider) Stop(ctx context.Context) error {
close(provider.stopC)
return nil
}
func (provider *Provider) Healthy() <-chan struct{} {
return provider.healthyC
}
func (provider *Provider) collectOrg(ctx context.Context, org *types.Organization, license *licensetypes.License) error {
now := time.Now().UTC()
// Use one timestamp so a tick cannot straddle midnight.
todayStart := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.UTC)
if provider.config.Backfill {
checkpointsByMeter, err := provider.checkpoints(ctx, license.Key)
if err != nil {
return err
}
nextByCollector := provider.nextDays(license, todayStart, checkpointsByMeter)
start, end, ok := backfillRange(nextByCollector, todayStart)
if ok {
for day := start; !day.After(end); day = day.AddDate(0, 0, 1) {
eligible := eligibleCollectors(provider.collectorsByName, nextByCollector, day)
if len(eligible) == 0 {
continue
}
window, err := zeustypes.NewMeterWindow(day.UnixMilli(), day.AddDate(0, 0, 1).UnixMilli(), true)
if err != nil {
return err
}
if err := provider.report(ctx, org.ID, license, window, eligible); err != nil {
provider.settings.Logger().WarnContext(ctx, "failed to backfill for day", errors.Attr(err), slog.String("date", day.Format("2006-01-02")))
return err
}
}
}
}
// Today's partial window: every collector is always eligible (next <= today).
if now.UnixMilli() > todayStart.UnixMilli() {
todayWindow, err := zeustypes.NewMeterWindow(todayStart.UnixMilli(), now.UnixMilli(), false)
if err != nil {
return err
}
return provider.report(ctx, org.ID, license, todayWindow, provider.collectorsByName)
}
return nil
}
func (provider *Provider) checkpoints(ctx context.Context, licenseKey string) (map[string]time.Time, error) {
list, err := provider.zeus.ListMeterCheckpoints(ctx, licenseKey)
if err != nil {
provider.metrics.checkpoints.Add(ctx, 1, metric.WithAttributes(errors.TypeAttr(err)))
return nil, err
}
provider.metrics.checkpoints.Add(ctx, 1)
checkpointsByMeter := make(map[string]time.Time, len(list))
for _, checkpoint := range list {
checkpointsByMeter[checkpoint.Name] = checkpoint.StartDate.UTC()
}
return checkpointsByMeter, nil
}
func (provider *Provider) nextDays(license *licensetypes.License, todayStart time.Time, checkpointsByMeter map[string]time.Time) map[zeustypes.MeterName]time.Time {
nextByCollector := make(map[zeustypes.MeterName]time.Time, len(provider.collectorsByName))
licenseCreatedAt := license.CreatedAt.UTC()
licenseCreatedAtDay := time.Date(licenseCreatedAt.Year(), licenseCreatedAt.Month(), licenseCreatedAt.Day(), 0, 0, 0, 0, time.UTC)
for _, collector := range provider.collectorsByName {
checkpoint, hasCheckpoint := checkpointsByMeter[collector.Name().String()]
nextByCollector[collector.Name()] = nextReportableDay(licenseCreatedAtDay, todayStart, checkpoint, hasCheckpoint)
}
return nextByCollector
}
func nextReportableDay(licenseCreatedAtDay time.Time, todayStart time.Time, checkpoint time.Time, hasCheckpoint bool) time.Time {
next := licenseCreatedAtDay
if next.IsZero() {
next = todayStart
}
if hasCheckpoint {
checkpointNext := checkpoint.AddDate(0, 0, 1)
if checkpointNext.After(next) {
next = checkpointNext
}
}
return next
}
func (provider *Provider) report(ctx context.Context, orgID valuer.UUID, license *licensetypes.License, window zeustypes.MeterWindow, collectors map[zeustypes.MeterName]metercollector.MeterCollector) error {
date := time.UnixMilli(window.StartUnixMilli).UTC().Format("2006-01-02")
meters := make([]zeustypes.Meter, 0, len(collectors))
for _, collector := range collectors {
meterAttr := attribute.String("signoz.meter.name", collector.Name().String())
collectedReadings, err := collector.Collect(ctx, orgID, license, window)
if err != nil {
provider.metrics.collections.Add(ctx, 1, metric.WithAttributes(meterAttr, errors.TypeAttr(err)))
continue
}
provider.metrics.collections.Add(ctx, 1, metric.WithAttributes(meterAttr))
meters = append(meters, collectedReadings...)
}
if len(meters) == 0 {
return nil
}
idempotencyKey := fmt.Sprintf("meterreporter:%s", date)
body, err := json.Marshal(meters)
if err != nil {
provider.metrics.reports.Add(ctx, 1, metric.WithAttributes(errors.TypeAttr(err)))
return err
}
if err := provider.zeus.PutMetersV3(ctx, license.Key, idempotencyKey, body); err != nil {
provider.metrics.reports.Add(ctx, 1, metric.WithAttributes(errors.TypeAttr(err)))
return err
}
provider.metrics.reports.Add(ctx, 1)
provider.metrics.meters.Add(ctx, int64(len(meters)))
return nil
}
// backfillRange returns the inclusive sealed-day range ending at yesterday.
func backfillRange(nextByCollector map[zeustypes.MeterName]time.Time, todayStart time.Time) (start, end time.Time, ok bool) {
yesterday := todayStart.AddDate(0, 0, -1)
for _, next := range nextByCollector {
if !next.Before(todayStart) {
continue
}
if start.IsZero() || next.Before(start) {
start = next
}
}
if start.IsZero() || start.After(yesterday) {
return time.Time{}, time.Time{}, false
}
return start, yesterday, true
}
func eligibleCollectors(collectors map[zeustypes.MeterName]metercollector.MeterCollector, nextByCollector map[zeustypes.MeterName]time.Time, day time.Time) map[zeustypes.MeterName]metercollector.MeterCollector {
eligible := make(map[zeustypes.MeterName]metercollector.MeterCollector, len(collectors))
for name, collector := range collectors {
if !nextByCollector[name].After(day) {
eligible[name] = collector
}
}
return eligible
}

View File

@@ -1,48 +0,0 @@
package httpmeterreporter
import (
"github.com/SigNoz/signoz/pkg/errors"
"go.opentelemetry.io/otel/metric"
)
type reporterMetrics struct {
checkpoints metric.Int64Counter
reports metric.Int64Counter
collections metric.Int64Counter
meters metric.Int64Counter
}
func newReporterMetrics(meter metric.Meter) (*reporterMetrics, error) {
var errs error
checkpoints, err := meter.Int64Counter("signoz.meterreporter.checkpoints", metric.WithDescription("Zeus meter checkpoint fetches."), metric.WithUnit("{checkpoint}"))
if err != nil {
errs = errors.Join(errs, err)
}
reports, err := meter.Int64Counter("signoz.meterreporter.reports", metric.WithDescription("Meter reports shipped to Zeus."), metric.WithUnit("{report}"))
if err != nil {
errs = errors.Join(errs, err)
}
collections, err := meter.Int64Counter("signoz.meterreporter.collections", metric.WithDescription("Per-meter collect calls."), metric.WithUnit("{collection}"))
if err != nil {
errs = errors.Join(errs, err)
}
meters, err := meter.Int64Counter("signoz.meterreporter.meters", metric.WithDescription("Meter readings shipped to Zeus."), metric.WithUnit("{meter}"))
if err != nil {
errs = errors.Join(errs, err)
}
if errs != nil {
return nil, errs
}
return &reporterMetrics{
checkpoints: checkpoints,
reports: reports,
collections: collections,
meters: meters,
}, nil
}

View File

@@ -150,72 +150,6 @@ func (provider *Provider) PutMetersV2(ctx context.Context, key string, data []by
return err
}
func (provider *Provider) PutMetersV3(ctx context.Context, key string, idempotencyKey string, data []byte) error {
headers := http.Header{}
if idempotencyKey != "" {
headers.Set("X-Idempotency-Key", idempotencyKey)
}
_, err := provider.doWithHeaders(
ctx,
provider.config.URL.JoinPath("/v2/meters"),
http.MethodPost,
key,
data,
headers,
)
return err
}
func (provider *Provider) ListMeterCheckpoints(ctx context.Context, key string) ([]zeustypes.MeterCheckpoint, error) {
response, err := provider.do(
ctx,
provider.config.URL.JoinPath("/v2/meters/checkpoints"),
http.MethodGet,
key,
nil,
)
if err != nil {
return nil, err
}
checkpointValues := gjson.GetBytes(response, "data")
if !checkpointValues.Exists() || checkpointValues.Type == gjson.Null {
return nil, errors.Newf(errors.TypeInternal, zeus.ErrCodeResponseMalformed, "meter checkpoints are required")
}
if !checkpointValues.IsArray() {
return nil, errors.Newf(errors.TypeInternal, zeus.ErrCodeResponseMalformed, "meter checkpoints must be an array")
}
checkpointResults := checkpointValues.Array()
checkpoints := make([]zeustypes.MeterCheckpoint, 0, len(checkpointResults))
for _, checkpointValue := range checkpointResults {
name := checkpointValue.Get("name").String()
if name == "" {
return nil, errors.Newf(errors.TypeInternal, zeus.ErrCodeResponseMalformed, "meter checkpoint name is required")
}
startDateString := checkpointValue.Get("start_date").String()
if startDateString == "" {
return nil, errors.Newf(errors.TypeInternal, zeus.ErrCodeResponseMalformed, "meter checkpoint start_date is required for %q", name)
}
startDate, err := time.Parse("2006-01-02", startDateString)
if err != nil {
return nil, errors.Wrapf(err, errors.TypeInternal, zeus.ErrCodeResponseMalformed, "parse meter checkpoint start_date %q for %q", startDateString, name)
}
checkpoints = append(checkpoints, zeustypes.MeterCheckpoint{
Name: name,
StartDate: startDate,
})
}
return checkpoints, nil
}
func (provider *Provider) PutProfile(ctx context.Context, key string, profile *zeustypes.PostableProfile) error {
body, err := json.Marshal(profile)
if err != nil {
@@ -251,21 +185,12 @@ func (provider *Provider) PutHost(ctx context.Context, key string, host *zeustyp
}
func (provider *Provider) do(ctx context.Context, url *url.URL, method string, key string, requestBody []byte) ([]byte, error) {
return provider.doWithHeaders(ctx, url, method, key, requestBody, nil)
}
func (provider *Provider) doWithHeaders(ctx context.Context, url *url.URL, method string, key string, requestBody []byte, extraHeaders http.Header) ([]byte, error) {
request, err := http.NewRequestWithContext(ctx, method, url.String(), bytes.NewBuffer(requestBody))
if err != nil {
return nil, err
}
request.Header.Set("X-Signoz-Cloud-Api-Key", key)
request.Header.Set("Content-Type", "application/json")
for k, vs := range extraHeaders {
for _, v := range vs {
request.Header.Add(k, v)
}
}
response, err := provider.httpClient.Do(request)
if err != nil {

File diff suppressed because one or more lines are too long

View File

@@ -24,13 +24,12 @@ HASTOKEN=23
HAS=24
HASANY=25
HASALL=26
SEARCH=27
BOOL=28
NUMBER=29
QUOTED_TEXT=30
KEY=31
WS=32
FREETEXT=33
BOOL=27
NUMBER=28
QUOTED_TEXT=29
KEY=30
WS=31
FREETEXT=32
'('=1
')'=2
'['=3

File diff suppressed because one or more lines are too long

View File

@@ -24,13 +24,12 @@ HASTOKEN=23
HAS=24
HASANY=25
HASALL=26
SEARCH=27
BOOL=28
NUMBER=29
QUOTED_TEXT=30
KEY=31
WS=32
FREETEXT=33
BOOL=27
NUMBER=28
QUOTED_TEXT=29
KEY=30
WS=31
FREETEXT=32
'('=1
')'=2
'['=3

View File

@@ -1,4 +1,4 @@
// Generated from grammar/FilterQuery.g4 by ANTLR 4.13.2
// Generated from FilterQuery.g4 by ANTLR 4.13.1
// noinspection ES6UnusedImports,JSUnusedGlobalSymbols,JSUnusedLocalSymbols
import {
ATN,
@@ -38,13 +38,12 @@ export default class FilterQueryLexer extends Lexer {
public static readonly HAS = 24;
public static readonly HASANY = 25;
public static readonly HASALL = 26;
public static readonly SEARCH = 27;
public static readonly BOOL = 28;
public static readonly NUMBER = 29;
public static readonly QUOTED_TEXT = 30;
public static readonly KEY = 31;
public static readonly WS = 32;
public static readonly FREETEXT = 33;
public static readonly BOOL = 27;
public static readonly NUMBER = 28;
public static readonly QUOTED_TEXT = 29;
public static readonly KEY = 30;
public static readonly WS = 31;
public static readonly FREETEXT = 32;
public static readonly EOF = Token.EOF;
public static readonly channelNames: string[] = [ "DEFAULT_TOKEN_CHANNEL", "HIDDEN" ];
@@ -69,9 +68,8 @@ export default class FilterQueryLexer extends Lexer {
"AND", "OR",
"HASTOKEN",
"HAS", "HASANY",
"HASALL", "SEARCH",
"BOOL", "NUMBER",
"QUOTED_TEXT",
"HASALL", "BOOL",
"NUMBER", "QUOTED_TEXT",
"KEY", "WS",
"FREETEXT" ];
public static readonly modeNames: string[] = [ "DEFAULT_MODE", ];
@@ -80,8 +78,8 @@ export default class FilterQueryLexer extends Lexer {
"LPAREN", "RPAREN", "LBRACK", "RBRACK", "COMMA", "EQUALS", "NOT_EQUALS",
"NEQ", "LT", "LE", "GT", "GE", "LIKE", "ILIKE", "BETWEEN", "EXISTS", "REGEXP",
"CONTAINS", "IN", "NOT", "AND", "OR", "HASTOKEN", "HAS", "HASANY", "HASALL",
"SEARCH", "BOOL", "SIGN", "NUMBER", "QUOTED_TEXT", "SEGMENT", "EMPTY_BRACKS",
"OLD_JSON_BRACKS", "KEY", "WS", "DIGIT", "FREETEXT",
"BOOL", "SIGN", "NUMBER", "QUOTED_TEXT", "SEGMENT", "EMPTY_BRACKS", "OLD_JSON_BRACKS",
"KEY", "WS", "DIGIT", "FREETEXT",
];
@@ -102,122 +100,119 @@ export default class FilterQueryLexer extends Lexer {
public get modeNames(): string[] { return FilterQueryLexer.modeNames; }
public static readonly _serializedATN: number[] = [4,0,33,329,6,-1,2,0,
public static readonly _serializedATN: number[] = [4,0,32,320,6,-1,2,0,
7,0,2,1,7,1,2,2,7,2,2,3,7,3,2,4,7,4,2,5,7,5,2,6,7,6,2,7,7,7,2,8,7,8,2,9,
7,9,2,10,7,10,2,11,7,11,2,12,7,12,2,13,7,13,2,14,7,14,2,15,7,15,2,16,7,
16,2,17,7,17,2,18,7,18,2,19,7,19,2,20,7,20,2,21,7,21,2,22,7,22,2,23,7,23,
2,24,7,24,2,25,7,25,2,26,7,26,2,27,7,27,2,28,7,28,2,29,7,29,2,30,7,30,2,
31,7,31,2,32,7,32,2,33,7,33,2,34,7,34,2,35,7,35,2,36,7,36,2,37,7,37,1,0,
1,0,1,1,1,1,1,2,1,2,1,3,1,3,1,4,1,4,1,5,1,5,1,5,3,5,91,8,5,1,6,1,6,1,6,
1,7,1,7,1,7,1,8,1,8,1,9,1,9,1,9,1,10,1,10,1,11,1,11,1,11,1,12,1,12,1,12,
1,12,1,12,1,13,1,13,1,13,1,13,1,13,1,13,1,14,1,14,1,14,1,14,1,14,1,14,1,
14,1,14,1,15,1,15,1,15,1,15,1,15,1,15,3,15,134,8,15,1,16,1,16,1,16,1,16,
1,16,1,16,1,16,1,17,1,17,1,17,1,17,1,17,1,17,1,17,1,17,3,17,151,8,17,1,
18,1,18,1,18,1,19,1,19,1,19,1,19,1,20,1,20,1,20,1,20,1,21,1,21,1,21,1,22,
1,22,1,22,1,22,1,22,1,22,1,22,1,22,1,22,1,23,1,23,1,23,1,23,1,24,1,24,1,
24,1,24,1,24,1,24,1,24,1,25,1,25,1,25,1,25,1,25,1,25,1,25,1,26,1,26,1,26,
1,26,1,26,1,26,1,26,1,27,1,27,1,27,1,27,1,27,1,27,1,27,1,27,1,27,3,27,210,
8,27,1,28,1,28,1,29,3,29,215,8,29,1,29,4,29,218,8,29,11,29,12,29,219,1,
29,1,29,5,29,224,8,29,10,29,12,29,227,9,29,3,29,229,8,29,1,29,1,29,3,29,
233,8,29,1,29,4,29,236,8,29,11,29,12,29,237,3,29,240,8,29,1,29,3,29,243,
8,29,1,29,1,29,4,29,247,8,29,11,29,12,29,248,1,29,1,29,3,29,253,8,29,1,
29,4,29,256,8,29,11,29,12,29,257,3,29,260,8,29,3,29,262,8,29,1,30,1,30,
1,30,1,30,5,30,268,8,30,10,30,12,30,271,9,30,1,30,1,30,1,30,1,30,1,30,5,
30,278,8,30,10,30,12,30,281,9,30,1,30,3,30,284,8,30,1,31,1,31,5,31,288,
8,31,10,31,12,31,291,9,31,1,32,1,32,1,32,1,33,1,33,1,33,1,33,1,34,1,34,
1,34,1,34,1,34,1,34,1,34,4,34,307,8,34,11,34,12,34,308,5,34,311,8,34,10,
34,12,34,314,9,34,1,35,4,35,317,8,35,11,35,12,35,318,1,35,1,35,1,36,1,36,
1,37,4,37,326,8,37,11,37,12,37,327,0,0,38,1,1,3,2,5,3,7,4,9,5,11,6,13,7,
15,8,17,9,19,10,21,11,23,12,25,13,27,14,29,15,31,16,33,17,35,18,37,19,39,
20,41,21,43,22,45,23,47,24,49,25,51,26,53,27,55,28,57,0,59,29,61,30,63,
0,65,0,67,0,69,31,71,32,73,0,75,33,1,0,29,2,0,76,76,108,108,2,0,73,73,105,
105,2,0,75,75,107,107,2,0,69,69,101,101,2,0,66,66,98,98,2,0,84,84,116,116,
2,0,87,87,119,119,2,0,78,78,110,110,2,0,88,88,120,120,2,0,83,83,115,115,
2,0,82,82,114,114,2,0,71,71,103,103,2,0,80,80,112,112,2,0,67,67,99,99,2,
0,79,79,111,111,2,0,65,65,97,97,2,0,68,68,100,100,2,0,72,72,104,104,2,0,
89,89,121,121,2,0,85,85,117,117,2,0,70,70,102,102,2,0,43,43,45,45,2,0,34,
34,92,92,2,0,39,39,92,92,4,0,35,36,64,90,95,95,97,123,7,0,35,36,45,45,47,
58,64,90,95,95,97,123,125,125,3,0,9,10,13,13,32,32,1,0,48,57,8,0,9,10,13,
13,32,34,39,41,44,44,60,62,91,91,93,93,353,0,1,1,0,0,0,0,3,1,0,0,0,0,5,
1,0,0,0,0,7,1,0,0,0,0,9,1,0,0,0,0,11,1,0,0,0,0,13,1,0,0,0,0,15,1,0,0,0,
0,17,1,0,0,0,0,19,1,0,0,0,0,21,1,0,0,0,0,23,1,0,0,0,0,25,1,0,0,0,0,27,1,
0,0,0,0,29,1,0,0,0,0,31,1,0,0,0,0,33,1,0,0,0,0,35,1,0,0,0,0,37,1,0,0,0,
0,39,1,0,0,0,0,41,1,0,0,0,0,43,1,0,0,0,0,45,1,0,0,0,0,47,1,0,0,0,0,49,1,
0,0,0,0,51,1,0,0,0,0,53,1,0,0,0,0,55,1,0,0,0,0,59,1,0,0,0,0,61,1,0,0,0,
0,69,1,0,0,0,0,71,1,0,0,0,0,75,1,0,0,0,1,77,1,0,0,0,3,79,1,0,0,0,5,81,1,
0,0,0,7,83,1,0,0,0,9,85,1,0,0,0,11,90,1,0,0,0,13,92,1,0,0,0,15,95,1,0,0,
0,17,98,1,0,0,0,19,100,1,0,0,0,21,103,1,0,0,0,23,105,1,0,0,0,25,108,1,0,
0,0,27,113,1,0,0,0,29,119,1,0,0,0,31,127,1,0,0,0,33,135,1,0,0,0,35,142,
1,0,0,0,37,152,1,0,0,0,39,155,1,0,0,0,41,159,1,0,0,0,43,163,1,0,0,0,45,
166,1,0,0,0,47,175,1,0,0,0,49,179,1,0,0,0,51,186,1,0,0,0,53,193,1,0,0,0,
55,209,1,0,0,0,57,211,1,0,0,0,59,261,1,0,0,0,61,283,1,0,0,0,63,285,1,0,
0,0,65,292,1,0,0,0,67,295,1,0,0,0,69,299,1,0,0,0,71,316,1,0,0,0,73,322,
1,0,0,0,75,325,1,0,0,0,77,78,5,40,0,0,78,2,1,0,0,0,79,80,5,41,0,0,80,4,
1,0,0,0,81,82,5,91,0,0,82,6,1,0,0,0,83,84,5,93,0,0,84,8,1,0,0,0,85,86,5,
44,0,0,86,10,1,0,0,0,87,91,5,61,0,0,88,89,5,61,0,0,89,91,5,61,0,0,90,87,
1,0,0,0,90,88,1,0,0,0,91,12,1,0,0,0,92,93,5,33,0,0,93,94,5,61,0,0,94,14,
1,0,0,0,95,96,5,60,0,0,96,97,5,62,0,0,97,16,1,0,0,0,98,99,5,60,0,0,99,18,
1,0,0,0,100,101,5,60,0,0,101,102,5,61,0,0,102,20,1,0,0,0,103,104,5,62,0,
0,104,22,1,0,0,0,105,106,5,62,0,0,106,107,5,61,0,0,107,24,1,0,0,0,108,109,
7,0,0,0,109,110,7,1,0,0,110,111,7,2,0,0,111,112,7,3,0,0,112,26,1,0,0,0,
113,114,7,1,0,0,114,115,7,0,0,0,115,116,7,1,0,0,116,117,7,2,0,0,117,118,
7,3,0,0,118,28,1,0,0,0,119,120,7,4,0,0,120,121,7,3,0,0,121,122,7,5,0,0,
122,123,7,6,0,0,123,124,7,3,0,0,124,125,7,3,0,0,125,126,7,7,0,0,126,30,
1,0,0,0,127,128,7,3,0,0,128,129,7,8,0,0,129,130,7,1,0,0,130,131,7,9,0,0,
131,133,7,5,0,0,132,134,7,9,0,0,133,132,1,0,0,0,133,134,1,0,0,0,134,32,
1,0,0,0,135,136,7,10,0,0,136,137,7,3,0,0,137,138,7,11,0,0,138,139,7,3,0,
0,139,140,7,8,0,0,140,141,7,12,0,0,141,34,1,0,0,0,142,143,7,13,0,0,143,
144,7,14,0,0,144,145,7,7,0,0,145,146,7,5,0,0,146,147,7,15,0,0,147,148,7,
1,0,0,148,150,7,7,0,0,149,151,7,9,0,0,150,149,1,0,0,0,150,151,1,0,0,0,151,
36,1,0,0,0,152,153,7,1,0,0,153,154,7,7,0,0,154,38,1,0,0,0,155,156,7,7,0,
0,156,157,7,14,0,0,157,158,7,5,0,0,158,40,1,0,0,0,159,160,7,15,0,0,160,
161,7,7,0,0,161,162,7,16,0,0,162,42,1,0,0,0,163,164,7,14,0,0,164,165,7,
10,0,0,165,44,1,0,0,0,166,167,7,17,0,0,167,168,7,15,0,0,168,169,7,9,0,0,
169,170,7,5,0,0,170,171,7,14,0,0,171,172,7,2,0,0,172,173,7,3,0,0,173,174,
7,7,0,0,174,46,1,0,0,0,175,176,7,17,0,0,176,177,7,15,0,0,177,178,7,9,0,
0,178,48,1,0,0,0,179,180,7,17,0,0,180,181,7,15,0,0,181,182,7,9,0,0,182,
183,7,15,0,0,183,184,7,7,0,0,184,185,7,18,0,0,185,50,1,0,0,0,186,187,7,
17,0,0,187,188,7,15,0,0,188,189,7,9,0,0,189,190,7,15,0,0,190,191,7,0,0,
0,191,192,7,0,0,0,192,52,1,0,0,0,193,194,7,9,0,0,194,195,7,3,0,0,195,196,
7,15,0,0,196,197,7,10,0,0,197,198,7,13,0,0,198,199,7,17,0,0,199,54,1,0,
0,0,200,201,7,5,0,0,201,202,7,10,0,0,202,203,7,19,0,0,203,210,7,3,0,0,204,
205,7,20,0,0,205,206,7,15,0,0,206,207,7,0,0,0,207,208,7,9,0,0,208,210,7,
3,0,0,209,200,1,0,0,0,209,204,1,0,0,0,210,56,1,0,0,0,211,212,7,21,0,0,212,
58,1,0,0,0,213,215,3,57,28,0,214,213,1,0,0,0,214,215,1,0,0,0,215,217,1,
0,0,0,216,218,3,73,36,0,217,216,1,0,0,0,218,219,1,0,0,0,219,217,1,0,0,0,
219,220,1,0,0,0,220,228,1,0,0,0,221,225,5,46,0,0,222,224,3,73,36,0,223,
222,1,0,0,0,224,227,1,0,0,0,225,223,1,0,0,0,225,226,1,0,0,0,226,229,1,0,
0,0,227,225,1,0,0,0,228,221,1,0,0,0,228,229,1,0,0,0,229,239,1,0,0,0,230,
232,7,3,0,0,231,233,3,57,28,0,232,231,1,0,0,0,232,233,1,0,0,0,233,235,1,
0,0,0,234,236,3,73,36,0,235,234,1,0,0,0,236,237,1,0,0,0,237,235,1,0,0,0,
237,238,1,0,0,0,238,240,1,0,0,0,239,230,1,0,0,0,239,240,1,0,0,0,240,262,
1,0,0,0,241,243,3,57,28,0,242,241,1,0,0,0,242,243,1,0,0,0,243,244,1,0,0,
0,244,246,5,46,0,0,245,247,3,73,36,0,246,245,1,0,0,0,247,248,1,0,0,0,248,
246,1,0,0,0,248,249,1,0,0,0,249,259,1,0,0,0,250,252,7,3,0,0,251,253,3,57,
28,0,252,251,1,0,0,0,252,253,1,0,0,0,253,255,1,0,0,0,254,256,3,73,36,0,
255,254,1,0,0,0,256,257,1,0,0,0,257,255,1,0,0,0,257,258,1,0,0,0,258,260,
1,0,0,0,259,250,1,0,0,0,259,260,1,0,0,0,260,262,1,0,0,0,261,214,1,0,0,0,
261,242,1,0,0,0,262,60,1,0,0,0,263,269,5,34,0,0,264,268,8,22,0,0,265,266,
5,92,0,0,266,268,9,0,0,0,267,264,1,0,0,0,267,265,1,0,0,0,268,271,1,0,0,
0,269,267,1,0,0,0,269,270,1,0,0,0,270,272,1,0,0,0,271,269,1,0,0,0,272,284,
5,34,0,0,273,279,5,39,0,0,274,278,8,23,0,0,275,276,5,92,0,0,276,278,9,0,
0,0,277,274,1,0,0,0,277,275,1,0,0,0,278,281,1,0,0,0,279,277,1,0,0,0,279,
280,1,0,0,0,280,282,1,0,0,0,281,279,1,0,0,0,282,284,5,39,0,0,283,263,1,
0,0,0,283,273,1,0,0,0,284,62,1,0,0,0,285,289,7,24,0,0,286,288,7,25,0,0,
287,286,1,0,0,0,288,291,1,0,0,0,289,287,1,0,0,0,289,290,1,0,0,0,290,64,
1,0,0,0,291,289,1,0,0,0,292,293,5,91,0,0,293,294,5,93,0,0,294,66,1,0,0,
0,295,296,5,91,0,0,296,297,5,42,0,0,297,298,5,93,0,0,298,68,1,0,0,0,299,
312,3,63,31,0,300,301,5,46,0,0,301,311,3,63,31,0,302,311,3,65,32,0,303,
311,3,67,33,0,304,306,5,46,0,0,305,307,3,73,36,0,306,305,1,0,0,0,307,308,
1,0,0,0,308,306,1,0,0,0,308,309,1,0,0,0,309,311,1,0,0,0,310,300,1,0,0,0,
310,302,1,0,0,0,310,303,1,0,0,0,310,304,1,0,0,0,311,314,1,0,0,0,312,310,
1,0,0,0,312,313,1,0,0,0,313,70,1,0,0,0,314,312,1,0,0,0,315,317,7,26,0,0,
316,315,1,0,0,0,317,318,1,0,0,0,318,316,1,0,0,0,318,319,1,0,0,0,319,320,
1,0,0,0,320,321,6,35,0,0,321,72,1,0,0,0,322,323,7,27,0,0,323,74,1,0,0,0,
324,326,8,28,0,0,325,324,1,0,0,0,326,327,1,0,0,0,327,325,1,0,0,0,327,328,
1,0,0,0,328,76,1,0,0,0,29,0,90,133,150,209,214,219,225,228,232,237,239,
242,248,252,257,259,261,267,269,277,279,283,289,308,310,312,318,327,1,6,
0,0];
31,7,31,2,32,7,32,2,33,7,33,2,34,7,34,2,35,7,35,2,36,7,36,1,0,1,0,1,1,1,
1,1,2,1,2,1,3,1,3,1,4,1,4,1,5,1,5,1,5,3,5,89,8,5,1,6,1,6,1,6,1,7,1,7,1,
7,1,8,1,8,1,9,1,9,1,9,1,10,1,10,1,11,1,11,1,11,1,12,1,12,1,12,1,12,1,12,
1,13,1,13,1,13,1,13,1,13,1,13,1,14,1,14,1,14,1,14,1,14,1,14,1,14,1,14,1,
15,1,15,1,15,1,15,1,15,1,15,3,15,132,8,15,1,16,1,16,1,16,1,16,1,16,1,16,
1,16,1,17,1,17,1,17,1,17,1,17,1,17,1,17,1,17,3,17,149,8,17,1,18,1,18,1,
18,1,19,1,19,1,19,1,19,1,20,1,20,1,20,1,20,1,21,1,21,1,21,1,22,1,22,1,22,
1,22,1,22,1,22,1,22,1,22,1,22,1,23,1,23,1,23,1,23,1,24,1,24,1,24,1,24,1,
24,1,24,1,24,1,25,1,25,1,25,1,25,1,25,1,25,1,25,1,26,1,26,1,26,1,26,1,26,
1,26,1,26,1,26,1,26,3,26,201,8,26,1,27,1,27,1,28,3,28,206,8,28,1,28,4,28,
209,8,28,11,28,12,28,210,1,28,1,28,5,28,215,8,28,10,28,12,28,218,9,28,3,
28,220,8,28,1,28,1,28,3,28,224,8,28,1,28,4,28,227,8,28,11,28,12,28,228,
3,28,231,8,28,1,28,3,28,234,8,28,1,28,1,28,4,28,238,8,28,11,28,12,28,239,
1,28,1,28,3,28,244,8,28,1,28,4,28,247,8,28,11,28,12,28,248,3,28,251,8,28,
3,28,253,8,28,1,29,1,29,1,29,1,29,5,29,259,8,29,10,29,12,29,262,9,29,1,
29,1,29,1,29,1,29,1,29,5,29,269,8,29,10,29,12,29,272,9,29,1,29,3,29,275,
8,29,1,30,1,30,5,30,279,8,30,10,30,12,30,282,9,30,1,31,1,31,1,31,1,32,1,
32,1,32,1,32,1,33,1,33,1,33,1,33,1,33,1,33,1,33,4,33,298,8,33,11,33,12,
33,299,5,33,302,8,33,10,33,12,33,305,9,33,1,34,4,34,308,8,34,11,34,12,34,
309,1,34,1,34,1,35,1,35,1,36,4,36,317,8,36,11,36,12,36,318,0,0,37,1,1,3,
2,5,3,7,4,9,5,11,6,13,7,15,8,17,9,19,10,21,11,23,12,25,13,27,14,29,15,31,
16,33,17,35,18,37,19,39,20,41,21,43,22,45,23,47,24,49,25,51,26,53,27,55,
0,57,28,59,29,61,0,63,0,65,0,67,30,69,31,71,0,73,32,1,0,29,2,0,76,76,108,
108,2,0,73,73,105,105,2,0,75,75,107,107,2,0,69,69,101,101,2,0,66,66,98,
98,2,0,84,84,116,116,2,0,87,87,119,119,2,0,78,78,110,110,2,0,88,88,120,
120,2,0,83,83,115,115,2,0,82,82,114,114,2,0,71,71,103,103,2,0,80,80,112,
112,2,0,67,67,99,99,2,0,79,79,111,111,2,0,65,65,97,97,2,0,68,68,100,100,
2,0,72,72,104,104,2,0,89,89,121,121,2,0,85,85,117,117,2,0,70,70,102,102,
2,0,43,43,45,45,2,0,34,34,92,92,2,0,39,39,92,92,4,0,35,36,64,90,95,95,97,
123,7,0,35,36,45,45,47,58,64,90,95,95,97,123,125,125,3,0,9,10,13,13,32,
32,1,0,48,57,8,0,9,10,13,13,32,34,39,41,44,44,60,62,91,91,93,93,344,0,1,
1,0,0,0,0,3,1,0,0,0,0,5,1,0,0,0,0,7,1,0,0,0,0,9,1,0,0,0,0,11,1,0,0,0,0,
13,1,0,0,0,0,15,1,0,0,0,0,17,1,0,0,0,0,19,1,0,0,0,0,21,1,0,0,0,0,23,1,0,
0,0,0,25,1,0,0,0,0,27,1,0,0,0,0,29,1,0,0,0,0,31,1,0,0,0,0,33,1,0,0,0,0,
35,1,0,0,0,0,37,1,0,0,0,0,39,1,0,0,0,0,41,1,0,0,0,0,43,1,0,0,0,0,45,1,0,
0,0,0,47,1,0,0,0,0,49,1,0,0,0,0,51,1,0,0,0,0,53,1,0,0,0,0,57,1,0,0,0,0,
59,1,0,0,0,0,67,1,0,0,0,0,69,1,0,0,0,0,73,1,0,0,0,1,75,1,0,0,0,3,77,1,0,
0,0,5,79,1,0,0,0,7,81,1,0,0,0,9,83,1,0,0,0,11,88,1,0,0,0,13,90,1,0,0,0,
15,93,1,0,0,0,17,96,1,0,0,0,19,98,1,0,0,0,21,101,1,0,0,0,23,103,1,0,0,0,
25,106,1,0,0,0,27,111,1,0,0,0,29,117,1,0,0,0,31,125,1,0,0,0,33,133,1,0,
0,0,35,140,1,0,0,0,37,150,1,0,0,0,39,153,1,0,0,0,41,157,1,0,0,0,43,161,
1,0,0,0,45,164,1,0,0,0,47,173,1,0,0,0,49,177,1,0,0,0,51,184,1,0,0,0,53,
200,1,0,0,0,55,202,1,0,0,0,57,252,1,0,0,0,59,274,1,0,0,0,61,276,1,0,0,0,
63,283,1,0,0,0,65,286,1,0,0,0,67,290,1,0,0,0,69,307,1,0,0,0,71,313,1,0,
0,0,73,316,1,0,0,0,75,76,5,40,0,0,76,2,1,0,0,0,77,78,5,41,0,0,78,4,1,0,
0,0,79,80,5,91,0,0,80,6,1,0,0,0,81,82,5,93,0,0,82,8,1,0,0,0,83,84,5,44,
0,0,84,10,1,0,0,0,85,89,5,61,0,0,86,87,5,61,0,0,87,89,5,61,0,0,88,85,1,
0,0,0,88,86,1,0,0,0,89,12,1,0,0,0,90,91,5,33,0,0,91,92,5,61,0,0,92,14,1,
0,0,0,93,94,5,60,0,0,94,95,5,62,0,0,95,16,1,0,0,0,96,97,5,60,0,0,97,18,
1,0,0,0,98,99,5,60,0,0,99,100,5,61,0,0,100,20,1,0,0,0,101,102,5,62,0,0,
102,22,1,0,0,0,103,104,5,62,0,0,104,105,5,61,0,0,105,24,1,0,0,0,106,107,
7,0,0,0,107,108,7,1,0,0,108,109,7,2,0,0,109,110,7,3,0,0,110,26,1,0,0,0,
111,112,7,1,0,0,112,113,7,0,0,0,113,114,7,1,0,0,114,115,7,2,0,0,115,116,
7,3,0,0,116,28,1,0,0,0,117,118,7,4,0,0,118,119,7,3,0,0,119,120,7,5,0,0,
120,121,7,6,0,0,121,122,7,3,0,0,122,123,7,3,0,0,123,124,7,7,0,0,124,30,
1,0,0,0,125,126,7,3,0,0,126,127,7,8,0,0,127,128,7,1,0,0,128,129,7,9,0,0,
129,131,7,5,0,0,130,132,7,9,0,0,131,130,1,0,0,0,131,132,1,0,0,0,132,32,
1,0,0,0,133,134,7,10,0,0,134,135,7,3,0,0,135,136,7,11,0,0,136,137,7,3,0,
0,137,138,7,8,0,0,138,139,7,12,0,0,139,34,1,0,0,0,140,141,7,13,0,0,141,
142,7,14,0,0,142,143,7,7,0,0,143,144,7,5,0,0,144,145,7,15,0,0,145,146,7,
1,0,0,146,148,7,7,0,0,147,149,7,9,0,0,148,147,1,0,0,0,148,149,1,0,0,0,149,
36,1,0,0,0,150,151,7,1,0,0,151,152,7,7,0,0,152,38,1,0,0,0,153,154,7,7,0,
0,154,155,7,14,0,0,155,156,7,5,0,0,156,40,1,0,0,0,157,158,7,15,0,0,158,
159,7,7,0,0,159,160,7,16,0,0,160,42,1,0,0,0,161,162,7,14,0,0,162,163,7,
10,0,0,163,44,1,0,0,0,164,165,7,17,0,0,165,166,7,15,0,0,166,167,7,9,0,0,
167,168,7,5,0,0,168,169,7,14,0,0,169,170,7,2,0,0,170,171,7,3,0,0,171,172,
7,7,0,0,172,46,1,0,0,0,173,174,7,17,0,0,174,175,7,15,0,0,175,176,7,9,0,
0,176,48,1,0,0,0,177,178,7,17,0,0,178,179,7,15,0,0,179,180,7,9,0,0,180,
181,7,15,0,0,181,182,7,7,0,0,182,183,7,18,0,0,183,50,1,0,0,0,184,185,7,
17,0,0,185,186,7,15,0,0,186,187,7,9,0,0,187,188,7,15,0,0,188,189,7,0,0,
0,189,190,7,0,0,0,190,52,1,0,0,0,191,192,7,5,0,0,192,193,7,10,0,0,193,194,
7,19,0,0,194,201,7,3,0,0,195,196,7,20,0,0,196,197,7,15,0,0,197,198,7,0,
0,0,198,199,7,9,0,0,199,201,7,3,0,0,200,191,1,0,0,0,200,195,1,0,0,0,201,
54,1,0,0,0,202,203,7,21,0,0,203,56,1,0,0,0,204,206,3,55,27,0,205,204,1,
0,0,0,205,206,1,0,0,0,206,208,1,0,0,0,207,209,3,71,35,0,208,207,1,0,0,0,
209,210,1,0,0,0,210,208,1,0,0,0,210,211,1,0,0,0,211,219,1,0,0,0,212,216,
5,46,0,0,213,215,3,71,35,0,214,213,1,0,0,0,215,218,1,0,0,0,216,214,1,0,
0,0,216,217,1,0,0,0,217,220,1,0,0,0,218,216,1,0,0,0,219,212,1,0,0,0,219,
220,1,0,0,0,220,230,1,0,0,0,221,223,7,3,0,0,222,224,3,55,27,0,223,222,1,
0,0,0,223,224,1,0,0,0,224,226,1,0,0,0,225,227,3,71,35,0,226,225,1,0,0,0,
227,228,1,0,0,0,228,226,1,0,0,0,228,229,1,0,0,0,229,231,1,0,0,0,230,221,
1,0,0,0,230,231,1,0,0,0,231,253,1,0,0,0,232,234,3,55,27,0,233,232,1,0,0,
0,233,234,1,0,0,0,234,235,1,0,0,0,235,237,5,46,0,0,236,238,3,71,35,0,237,
236,1,0,0,0,238,239,1,0,0,0,239,237,1,0,0,0,239,240,1,0,0,0,240,250,1,0,
0,0,241,243,7,3,0,0,242,244,3,55,27,0,243,242,1,0,0,0,243,244,1,0,0,0,244,
246,1,0,0,0,245,247,3,71,35,0,246,245,1,0,0,0,247,248,1,0,0,0,248,246,1,
0,0,0,248,249,1,0,0,0,249,251,1,0,0,0,250,241,1,0,0,0,250,251,1,0,0,0,251,
253,1,0,0,0,252,205,1,0,0,0,252,233,1,0,0,0,253,58,1,0,0,0,254,260,5,34,
0,0,255,259,8,22,0,0,256,257,5,92,0,0,257,259,9,0,0,0,258,255,1,0,0,0,258,
256,1,0,0,0,259,262,1,0,0,0,260,258,1,0,0,0,260,261,1,0,0,0,261,263,1,0,
0,0,262,260,1,0,0,0,263,275,5,34,0,0,264,270,5,39,0,0,265,269,8,23,0,0,
266,267,5,92,0,0,267,269,9,0,0,0,268,265,1,0,0,0,268,266,1,0,0,0,269,272,
1,0,0,0,270,268,1,0,0,0,270,271,1,0,0,0,271,273,1,0,0,0,272,270,1,0,0,0,
273,275,5,39,0,0,274,254,1,0,0,0,274,264,1,0,0,0,275,60,1,0,0,0,276,280,
7,24,0,0,277,279,7,25,0,0,278,277,1,0,0,0,279,282,1,0,0,0,280,278,1,0,0,
0,280,281,1,0,0,0,281,62,1,0,0,0,282,280,1,0,0,0,283,284,5,91,0,0,284,285,
5,93,0,0,285,64,1,0,0,0,286,287,5,91,0,0,287,288,5,42,0,0,288,289,5,93,
0,0,289,66,1,0,0,0,290,303,3,61,30,0,291,292,5,46,0,0,292,302,3,61,30,0,
293,302,3,63,31,0,294,302,3,65,32,0,295,297,5,46,0,0,296,298,3,71,35,0,
297,296,1,0,0,0,298,299,1,0,0,0,299,297,1,0,0,0,299,300,1,0,0,0,300,302,
1,0,0,0,301,291,1,0,0,0,301,293,1,0,0,0,301,294,1,0,0,0,301,295,1,0,0,0,
302,305,1,0,0,0,303,301,1,0,0,0,303,304,1,0,0,0,304,68,1,0,0,0,305,303,
1,0,0,0,306,308,7,26,0,0,307,306,1,0,0,0,308,309,1,0,0,0,309,307,1,0,0,
0,309,310,1,0,0,0,310,311,1,0,0,0,311,312,6,34,0,0,312,70,1,0,0,0,313,314,
7,27,0,0,314,72,1,0,0,0,315,317,8,28,0,0,316,315,1,0,0,0,317,318,1,0,0,
0,318,316,1,0,0,0,318,319,1,0,0,0,319,74,1,0,0,0,29,0,88,131,148,200,205,
210,216,219,223,228,230,233,239,243,248,250,252,258,260,268,270,274,280,
299,301,303,309,318,1,6,0,0];
private static __ATN: ATN;
public static get _ATN(): ATN {

View File

@@ -1,25 +1,25 @@
// Generated from grammar/FilterQuery.g4 by ANTLR 4.13.2
// Generated from FilterQuery.g4 by ANTLR 4.13.1
import {ParseTreeListener} from "antlr4";
import { QueryContext } from "./FilterQueryParser.js";
import { ExpressionContext } from "./FilterQueryParser.js";
import { OrExpressionContext } from "./FilterQueryParser.js";
import { AndExpressionContext } from "./FilterQueryParser.js";
import { UnaryExpressionContext } from "./FilterQueryParser.js";
import { PrimaryContext } from "./FilterQueryParser.js";
import { ComparisonContext } from "./FilterQueryParser.js";
import { InClauseContext } from "./FilterQueryParser.js";
import { NotInClauseContext } from "./FilterQueryParser.js";
import { ValueListContext } from "./FilterQueryParser.js";
import { FullTextContext } from "./FilterQueryParser.js";
import { FunctionCallContext } from "./FilterQueryParser.js";
import { FunctionParamListContext } from "./FilterQueryParser.js";
import { FunctionParamContext } from "./FilterQueryParser.js";
import { ArrayContext } from "./FilterQueryParser.js";
import { ValueContext } from "./FilterQueryParser.js";
import { KeyContext } from "./FilterQueryParser.js";
import { QueryContext } from "./FilterQueryParser";
import { ExpressionContext } from "./FilterQueryParser";
import { OrExpressionContext } from "./FilterQueryParser";
import { AndExpressionContext } from "./FilterQueryParser";
import { UnaryExpressionContext } from "./FilterQueryParser";
import { PrimaryContext } from "./FilterQueryParser";
import { ComparisonContext } from "./FilterQueryParser";
import { InClauseContext } from "./FilterQueryParser";
import { NotInClauseContext } from "./FilterQueryParser";
import { ValueListContext } from "./FilterQueryParser";
import { FullTextContext } from "./FilterQueryParser";
import { FunctionCallContext } from "./FilterQueryParser";
import { FunctionParamListContext } from "./FilterQueryParser";
import { FunctionParamContext } from "./FilterQueryParser";
import { ArrayContext } from "./FilterQueryParser";
import { ValueContext } from "./FilterQueryParser";
import { KeyContext } from "./FilterQueryParser";
/**

View File

@@ -1,4 +1,4 @@
// Generated from grammar/FilterQuery.g4 by ANTLR 4.13.2
// Generated from FilterQuery.g4 by ANTLR 4.13.1
// noinspection ES6UnusedImports,JSUnusedGlobalSymbols,JSUnusedLocalSymbols
import {
@@ -45,14 +45,13 @@ export default class FilterQueryParser extends Parser {
public static readonly HAS = 24;
public static readonly HASANY = 25;
public static readonly HASALL = 26;
public static readonly SEARCH = 27;
public static readonly BOOL = 28;
public static readonly NUMBER = 29;
public static readonly QUOTED_TEXT = 30;
public static readonly KEY = 31;
public static readonly WS = 32;
public static readonly FREETEXT = 33;
public static override readonly EOF = Token.EOF;
public static readonly BOOL = 27;
public static readonly NUMBER = 28;
public static readonly QUOTED_TEXT = 29;
public static readonly KEY = 30;
public static readonly WS = 31;
public static readonly FREETEXT = 32;
public static readonly EOF = Token.EOF;
public static readonly RULE_query = 0;
public static readonly RULE_expression = 1;
public static readonly RULE_orExpression = 2;
@@ -91,9 +90,8 @@ export default class FilterQueryParser extends Parser {
"AND", "OR",
"HASTOKEN",
"HAS", "HASANY",
"HASALL", "SEARCH",
"BOOL", "NUMBER",
"QUOTED_TEXT",
"HASALL", "BOOL",
"NUMBER", "QUOTED_TEXT",
"KEY", "WS",
"FREETEXT" ];
// tslint:disable:no-trailing-whitespace
@@ -224,7 +222,7 @@ export default class FilterQueryParser extends Parser {
this.state = 53;
this._errHandler.sync(this);
_la = this._input.LA(1);
while ((((_la) & ~0x1F) === 0 && ((1 << _la) & 4289724418) !== 0) || _la===33) {
while (((((_la - 1)) & ~0x1F) === 0 && ((1 << (_la - 1)) & 3218604033) !== 0)) {
{
this.state = 51;
this._errHandler.sync(this);
@@ -247,8 +245,7 @@ export default class FilterQueryParser extends Parser {
case 28:
case 29:
case 30:
case 31:
case 33:
case 32:
{
this.state = 50;
this.unaryExpression();
@@ -814,7 +811,7 @@ export default class FilterQueryParser extends Parser {
{
this.state = 190;
_la = this._input.LA(1);
if(!(_la===30 || _la===33)) {
if(!(_la===29 || _la===32)) {
this._errHandler.recoverInline(this);
}
else {
@@ -847,7 +844,7 @@ export default class FilterQueryParser extends Parser {
{
this.state = 192;
_la = this._input.LA(1);
if(!((((_la) & ~0x1F) === 0 && ((1 << _la) & 260046848) !== 0))) {
if(!((((_la) & ~0x1F) === 0 && ((1 << _la) & 125829120) !== 0))) {
this._errHandler.recoverInline(this);
}
else {
@@ -1002,7 +999,7 @@ export default class FilterQueryParser extends Parser {
{
this.state = 214;
_la = this._input.LA(1);
if(!((((_la) & ~0x1F) === 0 && ((1 << _la) & 4026531840) !== 0))) {
if(!((((_la) & ~0x1F) === 0 && ((1 << _la) & 2013265920) !== 0))) {
this._errHandler.recoverInline(this);
}
else {
@@ -1051,7 +1048,7 @@ export default class FilterQueryParser extends Parser {
return localctx;
}
public static readonly _serializedATN: number[] = [4,1,33,219,2,0,7,0,2,
public static readonly _serializedATN: number[] = [4,1,32,219,2,0,7,0,2,
1,7,1,2,2,7,2,2,3,7,3,2,4,7,4,2,5,7,5,2,6,7,6,2,7,7,7,2,8,7,8,2,9,7,9,2,
10,7,10,2,11,7,11,2,12,7,12,2,13,7,13,2,14,7,14,2,15,7,15,2,16,7,16,1,0,
1,0,1,0,1,1,1,1,1,2,1,2,1,2,5,2,43,8,2,10,2,12,2,46,9,2,1,3,1,3,1,3,1,3,
@@ -1066,7 +1063,7 @@ export default class FilterQueryParser extends Parser {
10,9,12,9,189,9,9,1,10,1,10,1,11,1,11,1,11,1,11,1,11,1,12,1,12,1,12,5,12,
201,8,12,10,12,12,12,204,9,12,1,13,1,13,1,13,3,13,209,8,13,1,14,1,14,1,
14,1,14,1,15,1,15,1,16,1,16,1,16,0,0,17,0,2,4,6,8,10,12,14,16,18,20,22,
24,26,28,30,32,0,5,1,0,7,8,1,0,13,14,2,0,30,30,33,33,1,0,23,27,1,0,28,31,
24,26,28,30,32,0,5,1,0,7,8,1,0,13,14,2,0,29,29,32,32,1,0,23,26,1,0,27,30,
235,0,34,1,0,0,0,2,37,1,0,0,0,4,39,1,0,0,0,6,47,1,0,0,0,8,57,1,0,0,0,10,
70,1,0,0,0,12,149,1,0,0,0,14,163,1,0,0,0,16,180,1,0,0,0,18,182,1,0,0,0,
20,190,1,0,0,0,22,192,1,0,0,0,24,197,1,0,0,0,26,208,1,0,0,0,28,210,1,0,
@@ -1118,7 +1115,7 @@ export default class FilterQueryParser extends Parser {
0,0,205,209,3,32,16,0,206,209,3,30,15,0,207,209,3,28,14,0,208,205,1,0,0,
0,208,206,1,0,0,0,208,207,1,0,0,0,209,27,1,0,0,0,210,211,5,3,0,0,211,212,
3,18,9,0,212,213,5,4,0,0,213,29,1,0,0,0,214,215,7,4,0,0,215,31,1,0,0,0,
216,217,5,31,0,0,217,33,1,0,0,0,11,44,51,53,57,70,149,163,180,187,202,208];
216,217,5,30,0,0,217,33,1,0,0,0,11,44,51,53,57,70,149,163,180,187,202,208];
private static __ATN: ATN;
public static get _ATN(): ATN {
@@ -1665,9 +1662,6 @@ export class FunctionCallContext extends ParserRuleContext {
public HASALL(): TerminalNode {
return this.getToken(FilterQueryParser.HASALL, 0);
}
public SEARCH(): TerminalNode {
return this.getToken(FilterQueryParser.SEARCH, 0);
}
public get ruleIndex(): number {
return FilterQueryParser.RULE_functionCall;
}

View File

@@ -1,25 +1,25 @@
// Generated from grammar/FilterQuery.g4 by ANTLR 4.13.2
// Generated from FilterQuery.g4 by ANTLR 4.13.1
import {ParseTreeVisitor} from 'antlr4';
import { QueryContext } from "./FilterQueryParser.js";
import { ExpressionContext } from "./FilterQueryParser.js";
import { OrExpressionContext } from "./FilterQueryParser.js";
import { AndExpressionContext } from "./FilterQueryParser.js";
import { UnaryExpressionContext } from "./FilterQueryParser.js";
import { PrimaryContext } from "./FilterQueryParser.js";
import { ComparisonContext } from "./FilterQueryParser.js";
import { InClauseContext } from "./FilterQueryParser.js";
import { NotInClauseContext } from "./FilterQueryParser.js";
import { ValueListContext } from "./FilterQueryParser.js";
import { FullTextContext } from "./FilterQueryParser.js";
import { FunctionCallContext } from "./FilterQueryParser.js";
import { FunctionParamListContext } from "./FilterQueryParser.js";
import { FunctionParamContext } from "./FilterQueryParser.js";
import { ArrayContext } from "./FilterQueryParser.js";
import { ValueContext } from "./FilterQueryParser.js";
import { KeyContext } from "./FilterQueryParser.js";
import { QueryContext } from "./FilterQueryParser";
import { ExpressionContext } from "./FilterQueryParser";
import { OrExpressionContext } from "./FilterQueryParser";
import { AndExpressionContext } from "./FilterQueryParser";
import { UnaryExpressionContext } from "./FilterQueryParser";
import { PrimaryContext } from "./FilterQueryParser";
import { ComparisonContext } from "./FilterQueryParser";
import { InClauseContext } from "./FilterQueryParser";
import { NotInClauseContext } from "./FilterQueryParser";
import { ValueListContext } from "./FilterQueryParser";
import { FullTextContext } from "./FilterQueryParser";
import { FunctionCallContext } from "./FilterQueryParser";
import { FunctionParamListContext } from "./FilterQueryParser";
import { FunctionParamContext } from "./FilterQueryParser";
import { ArrayContext } from "./FilterQueryParser";
import { ValueContext } from "./FilterQueryParser";
import { KeyContext } from "./FilterQueryParser";
/**

View File

@@ -107,7 +107,7 @@ fullText
* ...
*/
functionCall
: (HASTOKEN | HAS | HASANY | HASALL | SEARCH) LPAREN functionParamList RPAREN
: (HASTOKEN | HAS | HASANY | HASALL) LPAREN functionParamList RPAREN
;
// Function parameters can be keys, single scalar values, or arrays
@@ -184,7 +184,6 @@ HASTOKEN : [Hh][Aa][Ss][Tt][Oo][Kk][Ee][Nn];
HAS : [Hh][Aa][Ss] ;
HASANY : [Hh][Aa][Ss][Aa][Nn][Yy] ;
HASALL : [Hh][Aa][Ss][Aa][Ll][Ll] ;
SEARCH : [Ss][Ee][Aa][Rr][Cc][Hh] ;
// Potential boolean constants
BOOL

View File

@@ -4,8 +4,6 @@ import (
"errors" //nolint:depguard
"fmt"
"log/slog"
"go.opentelemetry.io/otel/attribute"
)
// base is the fundamental struct that implements the error interface.
@@ -255,10 +253,3 @@ func NewTimeoutf(code Code, format string, args ...any) *base {
func Attr(err error) slog.Attr {
return slog.Any("exception", err)
}
// TypeAttr returns an OTel attribute.KeyValue with the "error.type" semconv key
// set to the error's type string.
func TypeAttr(err error) attribute.KeyValue {
t, _, _, _, _, _ := Unwrapb(err)
return attribute.String("error.type", t.String())
}

View File

@@ -8,7 +8,6 @@ var (
FeatureHideRootUser = featuretypes.MustNewName("hide_root_user")
FeatureGetMetersFromZeus = featuretypes.MustNewName("get_meters_from_zeus")
FeaturePutMetersInZeus = featuretypes.MustNewName("put_meters_in_zeus")
FeatureUseMeterReporter = featuretypes.MustNewName("use_meter_reporter")
FeatureUseJSONBody = featuretypes.MustNewName("use_json_body")
)
@@ -54,14 +53,6 @@ func MustNewRegistry() featuretypes.Registry {
DefaultVariant: featuretypes.MustNewName("disabled"),
Variants: featuretypes.NewBooleanVariants(),
},
&featuretypes.Feature{
Name: FeatureUseMeterReporter,
Kind: featuretypes.KindBoolean,
Stage: featuretypes.StageExperimental,
Description: "Controls whether the enterprise meter reporter runs instead of the noop reporter",
DefaultVariant: featuretypes.MustNewName("disabled"),
Variants: featuretypes.NewBooleanVariants(),
},
&featuretypes.Feature{
Name: FeatureUseJSONBody,
Kind: featuretypes.KindBoolean,

View File

@@ -1,68 +0,0 @@
package metercollector
import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types/zeustypes"
)
const (
ProviderStatic = "static"
ProviderTelemetry = "telemetry"
)
type Config struct {
Provider string `mapstructure:"provider"`
Telemetry TelemetryConfig `mapstructure:"telemetry"`
Static StaticConfig `mapstructure:"static"`
}
func (c Config) Validate() error {
switch c.Provider {
case ProviderStatic:
return c.Static.Validate()
case ProviderTelemetry:
return c.Telemetry.Validate()
default:
return errors.Newf(errors.TypeInvalidInput, ErrCodeInvalidConfig, "meter collector: unknown provider %q", c.Provider)
}
}
type TelemetryConfig struct {
Name zeustypes.MeterName
Unit zeustypes.MeterUnit
Aggregation zeustypes.MeterAggregation
DBName string
TableName string
DefaultRetentionDays int
}
type StaticConfig struct {
Name zeustypes.MeterName
Unit zeustypes.MeterUnit
Aggregation zeustypes.MeterAggregation
Value int64
}
func (c StaticConfig) Validate() error {
if c.Name.IsZero() {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidConfig, "static meter collector: name must be set")
}
return nil
}
func (c TelemetryConfig) Validate() error {
if c.Name.IsZero() {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidConfig, "telemetry meter collector: name must be set")
}
if c.DBName == "" || c.TableName == "" {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidConfig, "telemetry meter collector: db_name and table_name are required")
}
if c.DefaultRetentionDays <= 0 {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidConfig, "telemetry meter collector: default_retention_days must be positive")
}
return nil
}

View File

@@ -1,25 +0,0 @@
package metercollector
import (
"context"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types/licensetypes"
"github.com/SigNoz/signoz/pkg/types/zeustypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
var (
ErrCodeMeterCollectorCollectFailed = errors.MustNewCode("meter_collector_collect_failed")
ErrCodeMeterCollectorInvalidCustomRetentionRule = errors.MustNewCode("meter_collector_invalid_custom_retention_rule")
ErrCodeInvalidConfig = errors.MustNewCode("meter_collector_invalid_config")
)
type MeterCollector interface {
Name() zeustypes.MeterName
Unit() zeustypes.MeterUnit
Aggregation() zeustypes.MeterAggregation
Origin(ctx context.Context, orgID valuer.UUID, license *licensetypes.License, todayStart time.Time) (time.Time, error)
Collect(ctx context.Context, orgID valuer.UUID, license *licensetypes.License, window zeustypes.MeterWindow) ([]zeustypes.Meter, error)
}

View File

@@ -1,37 +0,0 @@
package meterreporter
import (
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
)
var _ factory.Config = (*Config)(nil)
type Config struct {
// Interval is how often the reporter collects and ships meters.
Interval time.Duration `mapstructure:"interval"`
// Backfill enables sealed-day catch-up from the license creation day.
Backfill bool `mapstructure:"backfill"`
}
func newConfig() factory.Config {
return Config{
Interval: 6 * time.Hour,
Backfill: true,
}
}
func NewConfigFactory() factory.ConfigFactory {
return factory.NewConfigFactory(factory.MustNewName("meterreporter"), newConfig)
}
func (c Config) Validate() error {
if c.Interval < 5*time.Minute || c.Interval > 24*time.Hour {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput, "meterreporter::interval must be between 5m and 24h")
}
return nil
}

View File

@@ -1,14 +0,0 @@
package meterreporter
import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
)
var (
ErrCodeInvalidInput = errors.MustNewCode("meterreporter_invalid_input")
)
type Reporter interface {
factory.ServiceWithHealthy
}

View File

@@ -1,39 +0,0 @@
package noopmeterreporter
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/meterreporter"
)
type provider struct {
healthyC chan struct{}
stopC chan struct{}
}
func NewFactory() factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config] {
return factory.NewProviderFactory(factory.MustNewName("noop"), New)
}
func New(_ context.Context, _ factory.ProviderSettings, _ meterreporter.Config) (meterreporter.Reporter, error) {
return &provider{
healthyC: make(chan struct{}),
stopC: make(chan struct{}),
}, nil
}
func (p *provider) Start(_ context.Context) error {
close(p.healthyC)
<-p.stopC
return nil
}
func (p *provider) Stop(_ context.Context) error {
close(p.stopC)
return nil
}
func (p *provider) Healthy() <-chan struct{} {
return p.healthyC
}

View File

@@ -1,52 +0,0 @@
package implretention
import (
"context"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/modules/retention"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type getter struct {
store retentiontypes.Store
}
// NewGetter creates a retention getter backed by the retention store.
func NewGetter(store retentiontypes.Store) retention.Getter {
return &getter{
store: store,
}
}
// GetRetentionPolicySegments loads successful TTL changes and converts them into retention policy segments.
func (getter *getter) GetRetentionPolicySegments(
ctx context.Context,
orgID valuer.UUID,
dbName string,
tableName string,
fallbackDefaultDays int,
startMs int64,
endMs int64,
) ([]*retentiontypes.RetentionPolicySegment, error) {
if startMs >= endMs {
return nil, nil
}
if dbName == "" {
return nil, errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "dbName is empty")
}
if tableName == "" {
return nil, errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "tableName is empty")
}
if fallbackDefaultDays <= 0 {
return nil, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "non-positive fallbackDefaultDays %d", fallbackDefaultDays)
}
rows, err := getter.store.ListTTLSettingsByTableNameAndBeforeCreatedAt(ctx, orgID, dbName+"."+tableName, endMs)
if err != nil {
return nil, err
}
return retentiontypes.BuildRetentionPolicySegmentsFromRows(rows, fallbackDefaultDays, startMs, endMs)
}

View File

@@ -1,41 +0,0 @@
package implretention
import (
"context"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type store struct {
sqlstore sqlstore.SQLStore
}
// NewStore creates a SQL-backed retention store.
func NewStore(sqlstore sqlstore.SQLStore) retentiontypes.Store {
return &store{sqlstore: sqlstore}
}
// ListTTLSettingsByTableNameAndBeforeCreatedAt returns successful TTL settings before the given timestamp.
func (store *store) ListTTLSettingsByTableNameAndBeforeCreatedAt(ctx context.Context, orgID valuer.UUID, tableName string, beforeMs int64) ([]*retentiontypes.TTLSetting, error) {
rows := []*retentiontypes.TTLSetting{}
err := store.
sqlstore.
BunDB().
NewSelect().
Model(&rows).
Where("table_name = ?", tableName).
Where("org_id = ?", orgID.StringValue()).
Where("status = ?", retentiontypes.TTLSettingStatusSuccess).
Where("created_at < ?", time.UnixMilli(beforeMs).UTC()).
OrderExpr("created_at ASC").
Scan(ctx)
if err != nil {
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "load ttl_setting rows for org %q table %q", orgID.StringValue(), tableName)
}
return rows, nil
}

View File

@@ -1,14 +0,0 @@
package retention
import (
"context"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
// Getter resolves retention data and expressions for read paths.
type Getter interface {
// GetRetentionPolicySegments returns retention policy segments active over a half-open meter window.
GetRetentionPolicySegments(ctx context.Context, orgID valuer.UUID, dbName string, tableName string, fallbackDefaultDays int, startMs int64, endMs int64) ([]*retentiontypes.RetentionPolicySegment, error)
}

File diff suppressed because one or more lines are too long

View File

@@ -24,13 +24,12 @@ HASTOKEN=23
HAS=24
HASANY=25
HASALL=26
SEARCH=27
BOOL=28
NUMBER=29
QUOTED_TEXT=30
KEY=31
WS=32
FREETEXT=33
BOOL=27
NUMBER=28
QUOTED_TEXT=29
KEY=30
WS=31
FREETEXT=32
'('=1
')'=2
'['=3

File diff suppressed because one or more lines are too long

View File

@@ -24,13 +24,12 @@ HASTOKEN=23
HAS=24
HASANY=25
HASALL=26
SEARCH=27
BOOL=28
NUMBER=29
QUOTED_TEXT=30
KEY=31
WS=32
FREETEXT=33
BOOL=27
NUMBER=28
QUOTED_TEXT=29
KEY=30
WS=31
FREETEXT=32
'('=1
')'=2
'['=3

View File

@@ -4,9 +4,10 @@ package parser
import (
"fmt"
"github.com/antlr4-go/antlr/v4"
"sync"
"unicode"
"github.com/antlr4-go/antlr/v4"
)
// Suppress unused import error
@@ -50,174 +51,170 @@ func filterquerylexerLexerInit() {
"", "LPAREN", "RPAREN", "LBRACK", "RBRACK", "COMMA", "EQUALS", "NOT_EQUALS",
"NEQ", "LT", "LE", "GT", "GE", "LIKE", "ILIKE", "BETWEEN", "EXISTS",
"REGEXP", "CONTAINS", "IN", "NOT", "AND", "OR", "HASTOKEN", "HAS", "HASANY",
"HASALL", "SEARCH", "BOOL", "NUMBER", "QUOTED_TEXT", "KEY", "WS", "FREETEXT",
"HASALL", "BOOL", "NUMBER", "QUOTED_TEXT", "KEY", "WS", "FREETEXT",
}
staticData.RuleNames = []string{
"LPAREN", "RPAREN", "LBRACK", "RBRACK", "COMMA", "EQUALS", "NOT_EQUALS",
"NEQ", "LT", "LE", "GT", "GE", "LIKE", "ILIKE", "BETWEEN", "EXISTS",
"REGEXP", "CONTAINS", "IN", "NOT", "AND", "OR", "HASTOKEN", "HAS", "HASANY",
"HASALL", "SEARCH", "BOOL", "SIGN", "NUMBER", "QUOTED_TEXT", "SEGMENT",
"EMPTY_BRACKS", "OLD_JSON_BRACKS", "KEY", "WS", "DIGIT", "FREETEXT",
"HASALL", "BOOL", "SIGN", "NUMBER", "QUOTED_TEXT", "SEGMENT", "EMPTY_BRACKS",
"OLD_JSON_BRACKS", "KEY", "WS", "DIGIT", "FREETEXT",
}
staticData.PredictionContextCache = antlr.NewPredictionContextCache()
staticData.serializedATN = []int32{
4, 0, 33, 329, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2,
4, 0, 32, 320, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2,
4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2,
10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15,
7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7,
20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25,
2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2,
31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36,
7, 36, 2, 37, 7, 37, 1, 0, 1, 0, 1, 1, 1, 1, 1, 2, 1, 2, 1, 3, 1, 3, 1,
4, 1, 4, 1, 5, 1, 5, 1, 5, 3, 5, 91, 8, 5, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7,
1, 7, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 11,
1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1,
13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15,
1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 134, 8, 15, 1, 16, 1, 16, 1, 16, 1,
16, 1, 16, 1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17,
1, 17, 3, 17, 151, 8, 17, 1, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 19, 1,
19, 1, 20, 1, 20, 1, 20, 1, 20, 1, 21, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22,
1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 23, 1,
24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25,
1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1,
27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 3, 27, 210,
8, 27, 1, 28, 1, 28, 1, 29, 3, 29, 215, 8, 29, 1, 29, 4, 29, 218, 8, 29,
11, 29, 12, 29, 219, 1, 29, 1, 29, 5, 29, 224, 8, 29, 10, 29, 12, 29, 227,
9, 29, 3, 29, 229, 8, 29, 1, 29, 1, 29, 3, 29, 233, 8, 29, 1, 29, 4, 29,
236, 8, 29, 11, 29, 12, 29, 237, 3, 29, 240, 8, 29, 1, 29, 3, 29, 243,
8, 29, 1, 29, 1, 29, 4, 29, 247, 8, 29, 11, 29, 12, 29, 248, 1, 29, 1,
29, 3, 29, 253, 8, 29, 1, 29, 4, 29, 256, 8, 29, 11, 29, 12, 29, 257, 3,
29, 260, 8, 29, 3, 29, 262, 8, 29, 1, 30, 1, 30, 1, 30, 1, 30, 5, 30, 268,
8, 30, 10, 30, 12, 30, 271, 9, 30, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 5,
30, 278, 8, 30, 10, 30, 12, 30, 281, 9, 30, 1, 30, 3, 30, 284, 8, 30, 1,
31, 1, 31, 5, 31, 288, 8, 31, 10, 31, 12, 31, 291, 9, 31, 1, 32, 1, 32,
1, 32, 1, 33, 1, 33, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1,
34, 1, 34, 4, 34, 307, 8, 34, 11, 34, 12, 34, 308, 5, 34, 311, 8, 34, 10,
34, 12, 34, 314, 9, 34, 1, 35, 4, 35, 317, 8, 35, 11, 35, 12, 35, 318,
1, 35, 1, 35, 1, 36, 1, 36, 1, 37, 4, 37, 326, 8, 37, 11, 37, 12, 37, 327,
0, 0, 38, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19,
7, 36, 1, 0, 1, 0, 1, 1, 1, 1, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 5,
1, 5, 1, 5, 3, 5, 89, 8, 5, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1,
8, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1,
12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 14, 1, 14,
1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 1, 15, 1,
15, 1, 15, 3, 15, 132, 8, 15, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16,
1, 16, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 3, 17, 149,
8, 17, 1, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 19, 1, 19, 1, 20, 1, 20, 1,
20, 1, 20, 1, 21, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22,
1, 22, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 1,
24, 1, 24, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25,
1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 3, 26, 201,
8, 26, 1, 27, 1, 27, 1, 28, 3, 28, 206, 8, 28, 1, 28, 4, 28, 209, 8, 28,
11, 28, 12, 28, 210, 1, 28, 1, 28, 5, 28, 215, 8, 28, 10, 28, 12, 28, 218,
9, 28, 3, 28, 220, 8, 28, 1, 28, 1, 28, 3, 28, 224, 8, 28, 1, 28, 4, 28,
227, 8, 28, 11, 28, 12, 28, 228, 3, 28, 231, 8, 28, 1, 28, 3, 28, 234,
8, 28, 1, 28, 1, 28, 4, 28, 238, 8, 28, 11, 28, 12, 28, 239, 1, 28, 1,
28, 3, 28, 244, 8, 28, 1, 28, 4, 28, 247, 8, 28, 11, 28, 12, 28, 248, 3,
28, 251, 8, 28, 3, 28, 253, 8, 28, 1, 29, 1, 29, 1, 29, 1, 29, 5, 29, 259,
8, 29, 10, 29, 12, 29, 262, 9, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 5,
29, 269, 8, 29, 10, 29, 12, 29, 272, 9, 29, 1, 29, 3, 29, 275, 8, 29, 1,
30, 1, 30, 5, 30, 279, 8, 30, 10, 30, 12, 30, 282, 9, 30, 1, 31, 1, 31,
1, 31, 1, 32, 1, 32, 1, 32, 1, 32, 1, 33, 1, 33, 1, 33, 1, 33, 1, 33, 1,
33, 1, 33, 4, 33, 298, 8, 33, 11, 33, 12, 33, 299, 5, 33, 302, 8, 33, 10,
33, 12, 33, 305, 9, 33, 1, 34, 4, 34, 308, 8, 34, 11, 34, 12, 34, 309,
1, 34, 1, 34, 1, 35, 1, 35, 1, 36, 4, 36, 317, 8, 36, 11, 36, 12, 36, 318,
0, 0, 37, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19,
10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 17, 35, 18, 37,
19, 39, 20, 41, 21, 43, 22, 45, 23, 47, 24, 49, 25, 51, 26, 53, 27, 55,
28, 57, 0, 59, 29, 61, 30, 63, 0, 65, 0, 67, 0, 69, 31, 71, 32, 73, 0,
75, 33, 1, 0, 29, 2, 0, 76, 76, 108, 108, 2, 0, 73, 73, 105, 105, 2, 0,
75, 75, 107, 107, 2, 0, 69, 69, 101, 101, 2, 0, 66, 66, 98, 98, 2, 0, 84,
84, 116, 116, 2, 0, 87, 87, 119, 119, 2, 0, 78, 78, 110, 110, 2, 0, 88,
88, 120, 120, 2, 0, 83, 83, 115, 115, 2, 0, 82, 82, 114, 114, 2, 0, 71,
71, 103, 103, 2, 0, 80, 80, 112, 112, 2, 0, 67, 67, 99, 99, 2, 0, 79, 79,
111, 111, 2, 0, 65, 65, 97, 97, 2, 0, 68, 68, 100, 100, 2, 0, 72, 72, 104,
104, 2, 0, 89, 89, 121, 121, 2, 0, 85, 85, 117, 117, 2, 0, 70, 70, 102,
102, 2, 0, 43, 43, 45, 45, 2, 0, 34, 34, 92, 92, 2, 0, 39, 39, 92, 92,
4, 0, 35, 36, 64, 90, 95, 95, 97, 123, 7, 0, 35, 36, 45, 45, 47, 58, 64,
90, 95, 95, 97, 123, 125, 125, 3, 0, 9, 10, 13, 13, 32, 32, 1, 0, 48, 57,
8, 0, 9, 10, 13, 13, 32, 34, 39, 41, 44, 44, 60, 62, 91, 91, 93, 93, 353,
0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0,
0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0,
0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0,
0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1,
0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39,
1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0,
47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0,
0, 55, 1, 0, 0, 0, 0, 59, 1, 0, 0, 0, 0, 61, 1, 0, 0, 0, 0, 69, 1, 0, 0,
0, 0, 71, 1, 0, 0, 0, 0, 75, 1, 0, 0, 0, 1, 77, 1, 0, 0, 0, 3, 79, 1, 0,
0, 0, 5, 81, 1, 0, 0, 0, 7, 83, 1, 0, 0, 0, 9, 85, 1, 0, 0, 0, 11, 90,
1, 0, 0, 0, 13, 92, 1, 0, 0, 0, 15, 95, 1, 0, 0, 0, 17, 98, 1, 0, 0, 0,
19, 100, 1, 0, 0, 0, 21, 103, 1, 0, 0, 0, 23, 105, 1, 0, 0, 0, 25, 108,
1, 0, 0, 0, 27, 113, 1, 0, 0, 0, 29, 119, 1, 0, 0, 0, 31, 127, 1, 0, 0,
0, 33, 135, 1, 0, 0, 0, 35, 142, 1, 0, 0, 0, 37, 152, 1, 0, 0, 0, 39, 155,
1, 0, 0, 0, 41, 159, 1, 0, 0, 0, 43, 163, 1, 0, 0, 0, 45, 166, 1, 0, 0,
0, 47, 175, 1, 0, 0, 0, 49, 179, 1, 0, 0, 0, 51, 186, 1, 0, 0, 0, 53, 193,
1, 0, 0, 0, 55, 209, 1, 0, 0, 0, 57, 211, 1, 0, 0, 0, 59, 261, 1, 0, 0,
0, 61, 283, 1, 0, 0, 0, 63, 285, 1, 0, 0, 0, 65, 292, 1, 0, 0, 0, 67, 295,
1, 0, 0, 0, 69, 299, 1, 0, 0, 0, 71, 316, 1, 0, 0, 0, 73, 322, 1, 0, 0,
0, 75, 325, 1, 0, 0, 0, 77, 78, 5, 40, 0, 0, 78, 2, 1, 0, 0, 0, 79, 80,
5, 41, 0, 0, 80, 4, 1, 0, 0, 0, 81, 82, 5, 91, 0, 0, 82, 6, 1, 0, 0, 0,
83, 84, 5, 93, 0, 0, 84, 8, 1, 0, 0, 0, 85, 86, 5, 44, 0, 0, 86, 10, 1,
0, 0, 0, 87, 91, 5, 61, 0, 0, 88, 89, 5, 61, 0, 0, 89, 91, 5, 61, 0, 0,
90, 87, 1, 0, 0, 0, 90, 88, 1, 0, 0, 0, 91, 12, 1, 0, 0, 0, 92, 93, 5,
33, 0, 0, 93, 94, 5, 61, 0, 0, 94, 14, 1, 0, 0, 0, 95, 96, 5, 60, 0, 0,
96, 97, 5, 62, 0, 0, 97, 16, 1, 0, 0, 0, 98, 99, 5, 60, 0, 0, 99, 18, 1,
0, 0, 0, 100, 101, 5, 60, 0, 0, 101, 102, 5, 61, 0, 0, 102, 20, 1, 0, 0,
0, 103, 104, 5, 62, 0, 0, 104, 22, 1, 0, 0, 0, 105, 106, 5, 62, 0, 0, 106,
107, 5, 61, 0, 0, 107, 24, 1, 0, 0, 0, 108, 109, 7, 0, 0, 0, 109, 110,
7, 1, 0, 0, 110, 111, 7, 2, 0, 0, 111, 112, 7, 3, 0, 0, 112, 26, 1, 0,
0, 0, 113, 114, 7, 1, 0, 0, 114, 115, 7, 0, 0, 0, 115, 116, 7, 1, 0, 0,
116, 117, 7, 2, 0, 0, 117, 118, 7, 3, 0, 0, 118, 28, 1, 0, 0, 0, 119, 120,
7, 4, 0, 0, 120, 121, 7, 3, 0, 0, 121, 122, 7, 5, 0, 0, 122, 123, 7, 6,
0, 0, 123, 124, 7, 3, 0, 0, 124, 125, 7, 3, 0, 0, 125, 126, 7, 7, 0, 0,
126, 30, 1, 0, 0, 0, 127, 128, 7, 3, 0, 0, 128, 129, 7, 8, 0, 0, 129, 130,
7, 1, 0, 0, 130, 131, 7, 9, 0, 0, 131, 133, 7, 5, 0, 0, 132, 134, 7, 9,
0, 0, 133, 132, 1, 0, 0, 0, 133, 134, 1, 0, 0, 0, 134, 32, 1, 0, 0, 0,
135, 136, 7, 10, 0, 0, 136, 137, 7, 3, 0, 0, 137, 138, 7, 11, 0, 0, 138,
139, 7, 3, 0, 0, 139, 140, 7, 8, 0, 0, 140, 141, 7, 12, 0, 0, 141, 34,
1, 0, 0, 0, 142, 143, 7, 13, 0, 0, 143, 144, 7, 14, 0, 0, 144, 145, 7,
7, 0, 0, 145, 146, 7, 5, 0, 0, 146, 147, 7, 15, 0, 0, 147, 148, 7, 1, 0,
0, 148, 150, 7, 7, 0, 0, 149, 151, 7, 9, 0, 0, 150, 149, 1, 0, 0, 0, 150,
151, 1, 0, 0, 0, 151, 36, 1, 0, 0, 0, 152, 153, 7, 1, 0, 0, 153, 154, 7,
7, 0, 0, 154, 38, 1, 0, 0, 0, 155, 156, 7, 7, 0, 0, 156, 157, 7, 14, 0,
0, 157, 158, 7, 5, 0, 0, 158, 40, 1, 0, 0, 0, 159, 160, 7, 15, 0, 0, 160,
161, 7, 7, 0, 0, 161, 162, 7, 16, 0, 0, 162, 42, 1, 0, 0, 0, 163, 164,
7, 14, 0, 0, 164, 165, 7, 10, 0, 0, 165, 44, 1, 0, 0, 0, 166, 167, 7, 17,
0, 0, 167, 168, 7, 15, 0, 0, 168, 169, 7, 9, 0, 0, 169, 170, 7, 5, 0, 0,
170, 171, 7, 14, 0, 0, 171, 172, 7, 2, 0, 0, 172, 173, 7, 3, 0, 0, 173,
174, 7, 7, 0, 0, 174, 46, 1, 0, 0, 0, 175, 176, 7, 17, 0, 0, 176, 177,
7, 15, 0, 0, 177, 178, 7, 9, 0, 0, 178, 48, 1, 0, 0, 0, 179, 180, 7, 17,
0, 0, 180, 181, 7, 15, 0, 0, 181, 182, 7, 9, 0, 0, 182, 183, 7, 15, 0,
0, 183, 184, 7, 7, 0, 0, 184, 185, 7, 18, 0, 0, 185, 50, 1, 0, 0, 0, 186,
187, 7, 17, 0, 0, 187, 188, 7, 15, 0, 0, 188, 189, 7, 9, 0, 0, 189, 190,
7, 15, 0, 0, 190, 191, 7, 0, 0, 0, 191, 192, 7, 0, 0, 0, 192, 52, 1, 0,
0, 0, 193, 194, 7, 9, 0, 0, 194, 195, 7, 3, 0, 0, 195, 196, 7, 15, 0, 0,
196, 197, 7, 10, 0, 0, 197, 198, 7, 13, 0, 0, 198, 199, 7, 17, 0, 0, 199,
54, 1, 0, 0, 0, 200, 201, 7, 5, 0, 0, 201, 202, 7, 10, 0, 0, 202, 203,
7, 19, 0, 0, 203, 210, 7, 3, 0, 0, 204, 205, 7, 20, 0, 0, 205, 206, 7,
15, 0, 0, 206, 207, 7, 0, 0, 0, 207, 208, 7, 9, 0, 0, 208, 210, 7, 3, 0,
0, 209, 200, 1, 0, 0, 0, 209, 204, 1, 0, 0, 0, 210, 56, 1, 0, 0, 0, 211,
212, 7, 21, 0, 0, 212, 58, 1, 0, 0, 0, 213, 215, 3, 57, 28, 0, 214, 213,
1, 0, 0, 0, 214, 215, 1, 0, 0, 0, 215, 217, 1, 0, 0, 0, 216, 218, 3, 73,
36, 0, 217, 216, 1, 0, 0, 0, 218, 219, 1, 0, 0, 0, 219, 217, 1, 0, 0, 0,
219, 220, 1, 0, 0, 0, 220, 228, 1, 0, 0, 0, 221, 225, 5, 46, 0, 0, 222,
224, 3, 73, 36, 0, 223, 222, 1, 0, 0, 0, 224, 227, 1, 0, 0, 0, 225, 223,
1, 0, 0, 0, 225, 226, 1, 0, 0, 0, 226, 229, 1, 0, 0, 0, 227, 225, 1, 0,
0, 0, 228, 221, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 239, 1, 0, 0, 0,
230, 232, 7, 3, 0, 0, 231, 233, 3, 57, 28, 0, 232, 231, 1, 0, 0, 0, 232,
233, 1, 0, 0, 0, 233, 235, 1, 0, 0, 0, 234, 236, 3, 73, 36, 0, 235, 234,
1, 0, 0, 0, 236, 237, 1, 0, 0, 0, 237, 235, 1, 0, 0, 0, 237, 238, 1, 0,
0, 0, 238, 240, 1, 0, 0, 0, 239, 230, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0,
240, 262, 1, 0, 0, 0, 241, 243, 3, 57, 28, 0, 242, 241, 1, 0, 0, 0, 242,
243, 1, 0, 0, 0, 243, 244, 1, 0, 0, 0, 244, 246, 5, 46, 0, 0, 245, 247,
3, 73, 36, 0, 246, 245, 1, 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 246, 1,
0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 259, 1, 0, 0, 0, 250, 252, 7, 3, 0,
0, 251, 253, 3, 57, 28, 0, 252, 251, 1, 0, 0, 0, 252, 253, 1, 0, 0, 0,
253, 255, 1, 0, 0, 0, 254, 256, 3, 73, 36, 0, 255, 254, 1, 0, 0, 0, 256,
257, 1, 0, 0, 0, 257, 255, 1, 0, 0, 0, 257, 258, 1, 0, 0, 0, 258, 260,
1, 0, 0, 0, 259, 250, 1, 0, 0, 0, 259, 260, 1, 0, 0, 0, 260, 262, 1, 0,
0, 0, 261, 214, 1, 0, 0, 0, 261, 242, 1, 0, 0, 0, 262, 60, 1, 0, 0, 0,
263, 269, 5, 34, 0, 0, 264, 268, 8, 22, 0, 0, 265, 266, 5, 92, 0, 0, 266,
268, 9, 0, 0, 0, 267, 264, 1, 0, 0, 0, 267, 265, 1, 0, 0, 0, 268, 271,
1, 0, 0, 0, 269, 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 272, 1, 0,
0, 0, 271, 269, 1, 0, 0, 0, 272, 284, 5, 34, 0, 0, 273, 279, 5, 39, 0,
0, 274, 278, 8, 23, 0, 0, 275, 276, 5, 92, 0, 0, 276, 278, 9, 0, 0, 0,
277, 274, 1, 0, 0, 0, 277, 275, 1, 0, 0, 0, 278, 281, 1, 0, 0, 0, 279,
277, 1, 0, 0, 0, 279, 280, 1, 0, 0, 0, 280, 282, 1, 0, 0, 0, 281, 279,
1, 0, 0, 0, 282, 284, 5, 39, 0, 0, 283, 263, 1, 0, 0, 0, 283, 273, 1, 0,
0, 0, 284, 62, 1, 0, 0, 0, 285, 289, 7, 24, 0, 0, 286, 288, 7, 25, 0, 0,
287, 286, 1, 0, 0, 0, 288, 291, 1, 0, 0, 0, 289, 287, 1, 0, 0, 0, 289,
290, 1, 0, 0, 0, 290, 64, 1, 0, 0, 0, 291, 289, 1, 0, 0, 0, 292, 293, 5,
91, 0, 0, 293, 294, 5, 93, 0, 0, 294, 66, 1, 0, 0, 0, 295, 296, 5, 91,
0, 0, 296, 297, 5, 42, 0, 0, 297, 298, 5, 93, 0, 0, 298, 68, 1, 0, 0, 0,
299, 312, 3, 63, 31, 0, 300, 301, 5, 46, 0, 0, 301, 311, 3, 63, 31, 0,
302, 311, 3, 65, 32, 0, 303, 311, 3, 67, 33, 0, 304, 306, 5, 46, 0, 0,
305, 307, 3, 73, 36, 0, 306, 305, 1, 0, 0, 0, 307, 308, 1, 0, 0, 0, 308,
306, 1, 0, 0, 0, 308, 309, 1, 0, 0, 0, 309, 311, 1, 0, 0, 0, 310, 300,
1, 0, 0, 0, 310, 302, 1, 0, 0, 0, 310, 303, 1, 0, 0, 0, 310, 304, 1, 0,
0, 0, 311, 314, 1, 0, 0, 0, 312, 310, 1, 0, 0, 0, 312, 313, 1, 0, 0, 0,
313, 70, 1, 0, 0, 0, 314, 312, 1, 0, 0, 0, 315, 317, 7, 26, 0, 0, 316,
315, 1, 0, 0, 0, 317, 318, 1, 0, 0, 0, 318, 316, 1, 0, 0, 0, 318, 319,
1, 0, 0, 0, 319, 320, 1, 0, 0, 0, 320, 321, 6, 35, 0, 0, 321, 72, 1, 0,
0, 0, 322, 323, 7, 27, 0, 0, 323, 74, 1, 0, 0, 0, 324, 326, 8, 28, 0, 0,
325, 324, 1, 0, 0, 0, 326, 327, 1, 0, 0, 0, 327, 325, 1, 0, 0, 0, 327,
328, 1, 0, 0, 0, 328, 76, 1, 0, 0, 0, 29, 0, 90, 133, 150, 209, 214, 219,
225, 228, 232, 237, 239, 242, 248, 252, 257, 259, 261, 267, 269, 277, 279,
283, 289, 308, 310, 312, 318, 327, 1, 6, 0, 0,
0, 57, 28, 59, 29, 61, 0, 63, 0, 65, 0, 67, 30, 69, 31, 71, 0, 73, 32,
1, 0, 29, 2, 0, 76, 76, 108, 108, 2, 0, 73, 73, 105, 105, 2, 0, 75, 75,
107, 107, 2, 0, 69, 69, 101, 101, 2, 0, 66, 66, 98, 98, 2, 0, 84, 84, 116,
116, 2, 0, 87, 87, 119, 119, 2, 0, 78, 78, 110, 110, 2, 0, 88, 88, 120,
120, 2, 0, 83, 83, 115, 115, 2, 0, 82, 82, 114, 114, 2, 0, 71, 71, 103,
103, 2, 0, 80, 80, 112, 112, 2, 0, 67, 67, 99, 99, 2, 0, 79, 79, 111, 111,
2, 0, 65, 65, 97, 97, 2, 0, 68, 68, 100, 100, 2, 0, 72, 72, 104, 104, 2,
0, 89, 89, 121, 121, 2, 0, 85, 85, 117, 117, 2, 0, 70, 70, 102, 102, 2,
0, 43, 43, 45, 45, 2, 0, 34, 34, 92, 92, 2, 0, 39, 39, 92, 92, 4, 0, 35,
36, 64, 90, 95, 95, 97, 123, 7, 0, 35, 36, 45, 45, 47, 58, 64, 90, 95,
95, 97, 123, 125, 125, 3, 0, 9, 10, 13, 13, 32, 32, 1, 0, 48, 57, 8, 0,
9, 10, 13, 13, 32, 34, 39, 41, 44, 44, 60, 62, 91, 91, 93, 93, 344, 0,
1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0,
9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0,
0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0,
0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0,
0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1,
0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47,
1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0,
57, 1, 0, 0, 0, 0, 59, 1, 0, 0, 0, 0, 67, 1, 0, 0, 0, 0, 69, 1, 0, 0, 0,
0, 73, 1, 0, 0, 0, 1, 75, 1, 0, 0, 0, 3, 77, 1, 0, 0, 0, 5, 79, 1, 0, 0,
0, 7, 81, 1, 0, 0, 0, 9, 83, 1, 0, 0, 0, 11, 88, 1, 0, 0, 0, 13, 90, 1,
0, 0, 0, 15, 93, 1, 0, 0, 0, 17, 96, 1, 0, 0, 0, 19, 98, 1, 0, 0, 0, 21,
101, 1, 0, 0, 0, 23, 103, 1, 0, 0, 0, 25, 106, 1, 0, 0, 0, 27, 111, 1,
0, 0, 0, 29, 117, 1, 0, 0, 0, 31, 125, 1, 0, 0, 0, 33, 133, 1, 0, 0, 0,
35, 140, 1, 0, 0, 0, 37, 150, 1, 0, 0, 0, 39, 153, 1, 0, 0, 0, 41, 157,
1, 0, 0, 0, 43, 161, 1, 0, 0, 0, 45, 164, 1, 0, 0, 0, 47, 173, 1, 0, 0,
0, 49, 177, 1, 0, 0, 0, 51, 184, 1, 0, 0, 0, 53, 200, 1, 0, 0, 0, 55, 202,
1, 0, 0, 0, 57, 252, 1, 0, 0, 0, 59, 274, 1, 0, 0, 0, 61, 276, 1, 0, 0,
0, 63, 283, 1, 0, 0, 0, 65, 286, 1, 0, 0, 0, 67, 290, 1, 0, 0, 0, 69, 307,
1, 0, 0, 0, 71, 313, 1, 0, 0, 0, 73, 316, 1, 0, 0, 0, 75, 76, 5, 40, 0,
0, 76, 2, 1, 0, 0, 0, 77, 78, 5, 41, 0, 0, 78, 4, 1, 0, 0, 0, 79, 80, 5,
91, 0, 0, 80, 6, 1, 0, 0, 0, 81, 82, 5, 93, 0, 0, 82, 8, 1, 0, 0, 0, 83,
84, 5, 44, 0, 0, 84, 10, 1, 0, 0, 0, 85, 89, 5, 61, 0, 0, 86, 87, 5, 61,
0, 0, 87, 89, 5, 61, 0, 0, 88, 85, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, 89,
12, 1, 0, 0, 0, 90, 91, 5, 33, 0, 0, 91, 92, 5, 61, 0, 0, 92, 14, 1, 0,
0, 0, 93, 94, 5, 60, 0, 0, 94, 95, 5, 62, 0, 0, 95, 16, 1, 0, 0, 0, 96,
97, 5, 60, 0, 0, 97, 18, 1, 0, 0, 0, 98, 99, 5, 60, 0, 0, 99, 100, 5, 61,
0, 0, 100, 20, 1, 0, 0, 0, 101, 102, 5, 62, 0, 0, 102, 22, 1, 0, 0, 0,
103, 104, 5, 62, 0, 0, 104, 105, 5, 61, 0, 0, 105, 24, 1, 0, 0, 0, 106,
107, 7, 0, 0, 0, 107, 108, 7, 1, 0, 0, 108, 109, 7, 2, 0, 0, 109, 110,
7, 3, 0, 0, 110, 26, 1, 0, 0, 0, 111, 112, 7, 1, 0, 0, 112, 113, 7, 0,
0, 0, 113, 114, 7, 1, 0, 0, 114, 115, 7, 2, 0, 0, 115, 116, 7, 3, 0, 0,
116, 28, 1, 0, 0, 0, 117, 118, 7, 4, 0, 0, 118, 119, 7, 3, 0, 0, 119, 120,
7, 5, 0, 0, 120, 121, 7, 6, 0, 0, 121, 122, 7, 3, 0, 0, 122, 123, 7, 3,
0, 0, 123, 124, 7, 7, 0, 0, 124, 30, 1, 0, 0, 0, 125, 126, 7, 3, 0, 0,
126, 127, 7, 8, 0, 0, 127, 128, 7, 1, 0, 0, 128, 129, 7, 9, 0, 0, 129,
131, 7, 5, 0, 0, 130, 132, 7, 9, 0, 0, 131, 130, 1, 0, 0, 0, 131, 132,
1, 0, 0, 0, 132, 32, 1, 0, 0, 0, 133, 134, 7, 10, 0, 0, 134, 135, 7, 3,
0, 0, 135, 136, 7, 11, 0, 0, 136, 137, 7, 3, 0, 0, 137, 138, 7, 8, 0, 0,
138, 139, 7, 12, 0, 0, 139, 34, 1, 0, 0, 0, 140, 141, 7, 13, 0, 0, 141,
142, 7, 14, 0, 0, 142, 143, 7, 7, 0, 0, 143, 144, 7, 5, 0, 0, 144, 145,
7, 15, 0, 0, 145, 146, 7, 1, 0, 0, 146, 148, 7, 7, 0, 0, 147, 149, 7, 9,
0, 0, 148, 147, 1, 0, 0, 0, 148, 149, 1, 0, 0, 0, 149, 36, 1, 0, 0, 0,
150, 151, 7, 1, 0, 0, 151, 152, 7, 7, 0, 0, 152, 38, 1, 0, 0, 0, 153, 154,
7, 7, 0, 0, 154, 155, 7, 14, 0, 0, 155, 156, 7, 5, 0, 0, 156, 40, 1, 0,
0, 0, 157, 158, 7, 15, 0, 0, 158, 159, 7, 7, 0, 0, 159, 160, 7, 16, 0,
0, 160, 42, 1, 0, 0, 0, 161, 162, 7, 14, 0, 0, 162, 163, 7, 10, 0, 0, 163,
44, 1, 0, 0, 0, 164, 165, 7, 17, 0, 0, 165, 166, 7, 15, 0, 0, 166, 167,
7, 9, 0, 0, 167, 168, 7, 5, 0, 0, 168, 169, 7, 14, 0, 0, 169, 170, 7, 2,
0, 0, 170, 171, 7, 3, 0, 0, 171, 172, 7, 7, 0, 0, 172, 46, 1, 0, 0, 0,
173, 174, 7, 17, 0, 0, 174, 175, 7, 15, 0, 0, 175, 176, 7, 9, 0, 0, 176,
48, 1, 0, 0, 0, 177, 178, 7, 17, 0, 0, 178, 179, 7, 15, 0, 0, 179, 180,
7, 9, 0, 0, 180, 181, 7, 15, 0, 0, 181, 182, 7, 7, 0, 0, 182, 183, 7, 18,
0, 0, 183, 50, 1, 0, 0, 0, 184, 185, 7, 17, 0, 0, 185, 186, 7, 15, 0, 0,
186, 187, 7, 9, 0, 0, 187, 188, 7, 15, 0, 0, 188, 189, 7, 0, 0, 0, 189,
190, 7, 0, 0, 0, 190, 52, 1, 0, 0, 0, 191, 192, 7, 5, 0, 0, 192, 193, 7,
10, 0, 0, 193, 194, 7, 19, 0, 0, 194, 201, 7, 3, 0, 0, 195, 196, 7, 20,
0, 0, 196, 197, 7, 15, 0, 0, 197, 198, 7, 0, 0, 0, 198, 199, 7, 9, 0, 0,
199, 201, 7, 3, 0, 0, 200, 191, 1, 0, 0, 0, 200, 195, 1, 0, 0, 0, 201,
54, 1, 0, 0, 0, 202, 203, 7, 21, 0, 0, 203, 56, 1, 0, 0, 0, 204, 206, 3,
55, 27, 0, 205, 204, 1, 0, 0, 0, 205, 206, 1, 0, 0, 0, 206, 208, 1, 0,
0, 0, 207, 209, 3, 71, 35, 0, 208, 207, 1, 0, 0, 0, 209, 210, 1, 0, 0,
0, 210, 208, 1, 0, 0, 0, 210, 211, 1, 0, 0, 0, 211, 219, 1, 0, 0, 0, 212,
216, 5, 46, 0, 0, 213, 215, 3, 71, 35, 0, 214, 213, 1, 0, 0, 0, 215, 218,
1, 0, 0, 0, 216, 214, 1, 0, 0, 0, 216, 217, 1, 0, 0, 0, 217, 220, 1, 0,
0, 0, 218, 216, 1, 0, 0, 0, 219, 212, 1, 0, 0, 0, 219, 220, 1, 0, 0, 0,
220, 230, 1, 0, 0, 0, 221, 223, 7, 3, 0, 0, 222, 224, 3, 55, 27, 0, 223,
222, 1, 0, 0, 0, 223, 224, 1, 0, 0, 0, 224, 226, 1, 0, 0, 0, 225, 227,
3, 71, 35, 0, 226, 225, 1, 0, 0, 0, 227, 228, 1, 0, 0, 0, 228, 226, 1,
0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 231, 1, 0, 0, 0, 230, 221, 1, 0, 0,
0, 230, 231, 1, 0, 0, 0, 231, 253, 1, 0, 0, 0, 232, 234, 3, 55, 27, 0,
233, 232, 1, 0, 0, 0, 233, 234, 1, 0, 0, 0, 234, 235, 1, 0, 0, 0, 235,
237, 5, 46, 0, 0, 236, 238, 3, 71, 35, 0, 237, 236, 1, 0, 0, 0, 238, 239,
1, 0, 0, 0, 239, 237, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 250, 1, 0,
0, 0, 241, 243, 7, 3, 0, 0, 242, 244, 3, 55, 27, 0, 243, 242, 1, 0, 0,
0, 243, 244, 1, 0, 0, 0, 244, 246, 1, 0, 0, 0, 245, 247, 3, 71, 35, 0,
246, 245, 1, 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 246, 1, 0, 0, 0, 248,
249, 1, 0, 0, 0, 249, 251, 1, 0, 0, 0, 250, 241, 1, 0, 0, 0, 250, 251,
1, 0, 0, 0, 251, 253, 1, 0, 0, 0, 252, 205, 1, 0, 0, 0, 252, 233, 1, 0,
0, 0, 253, 58, 1, 0, 0, 0, 254, 260, 5, 34, 0, 0, 255, 259, 8, 22, 0, 0,
256, 257, 5, 92, 0, 0, 257, 259, 9, 0, 0, 0, 258, 255, 1, 0, 0, 0, 258,
256, 1, 0, 0, 0, 259, 262, 1, 0, 0, 0, 260, 258, 1, 0, 0, 0, 260, 261,
1, 0, 0, 0, 261, 263, 1, 0, 0, 0, 262, 260, 1, 0, 0, 0, 263, 275, 5, 34,
0, 0, 264, 270, 5, 39, 0, 0, 265, 269, 8, 23, 0, 0, 266, 267, 5, 92, 0,
0, 267, 269, 9, 0, 0, 0, 268, 265, 1, 0, 0, 0, 268, 266, 1, 0, 0, 0, 269,
272, 1, 0, 0, 0, 270, 268, 1, 0, 0, 0, 270, 271, 1, 0, 0, 0, 271, 273,
1, 0, 0, 0, 272, 270, 1, 0, 0, 0, 273, 275, 5, 39, 0, 0, 274, 254, 1, 0,
0, 0, 274, 264, 1, 0, 0, 0, 275, 60, 1, 0, 0, 0, 276, 280, 7, 24, 0, 0,
277, 279, 7, 25, 0, 0, 278, 277, 1, 0, 0, 0, 279, 282, 1, 0, 0, 0, 280,
278, 1, 0, 0, 0, 280, 281, 1, 0, 0, 0, 281, 62, 1, 0, 0, 0, 282, 280, 1,
0, 0, 0, 283, 284, 5, 91, 0, 0, 284, 285, 5, 93, 0, 0, 285, 64, 1, 0, 0,
0, 286, 287, 5, 91, 0, 0, 287, 288, 5, 42, 0, 0, 288, 289, 5, 93, 0, 0,
289, 66, 1, 0, 0, 0, 290, 303, 3, 61, 30, 0, 291, 292, 5, 46, 0, 0, 292,
302, 3, 61, 30, 0, 293, 302, 3, 63, 31, 0, 294, 302, 3, 65, 32, 0, 295,
297, 5, 46, 0, 0, 296, 298, 3, 71, 35, 0, 297, 296, 1, 0, 0, 0, 298, 299,
1, 0, 0, 0, 299, 297, 1, 0, 0, 0, 299, 300, 1, 0, 0, 0, 300, 302, 1, 0,
0, 0, 301, 291, 1, 0, 0, 0, 301, 293, 1, 0, 0, 0, 301, 294, 1, 0, 0, 0,
301, 295, 1, 0, 0, 0, 302, 305, 1, 0, 0, 0, 303, 301, 1, 0, 0, 0, 303,
304, 1, 0, 0, 0, 304, 68, 1, 0, 0, 0, 305, 303, 1, 0, 0, 0, 306, 308, 7,
26, 0, 0, 307, 306, 1, 0, 0, 0, 308, 309, 1, 0, 0, 0, 309, 307, 1, 0, 0,
0, 309, 310, 1, 0, 0, 0, 310, 311, 1, 0, 0, 0, 311, 312, 6, 34, 0, 0, 312,
70, 1, 0, 0, 0, 313, 314, 7, 27, 0, 0, 314, 72, 1, 0, 0, 0, 315, 317, 8,
28, 0, 0, 316, 315, 1, 0, 0, 0, 317, 318, 1, 0, 0, 0, 318, 316, 1, 0, 0,
0, 318, 319, 1, 0, 0, 0, 319, 74, 1, 0, 0, 0, 29, 0, 88, 131, 148, 200,
205, 210, 216, 219, 223, 228, 230, 233, 239, 243, 248, 250, 252, 258, 260,
268, 270, 274, 280, 299, 301, 303, 309, 318, 1, 6, 0, 0,
}
deserializer := antlr.NewATNDeserializer(nil)
staticData.atn = deserializer.Deserialize(staticData.serializedATN)
@@ -284,11 +281,10 @@ const (
FilterQueryLexerHAS = 24
FilterQueryLexerHASANY = 25
FilterQueryLexerHASALL = 26
FilterQueryLexerSEARCH = 27
FilterQueryLexerBOOL = 28
FilterQueryLexerNUMBER = 29
FilterQueryLexerQUOTED_TEXT = 30
FilterQueryLexerKEY = 31
FilterQueryLexerWS = 32
FilterQueryLexerFREETEXT = 33
FilterQueryLexerBOOL = 27
FilterQueryLexerNUMBER = 28
FilterQueryLexerQUOTED_TEXT = 29
FilterQueryLexerKEY = 30
FilterQueryLexerWS = 31
FilterQueryLexerFREETEXT = 32
)

View File

@@ -40,7 +40,7 @@ func filterqueryParserInit() {
"", "LPAREN", "RPAREN", "LBRACK", "RBRACK", "COMMA", "EQUALS", "NOT_EQUALS",
"NEQ", "LT", "LE", "GT", "GE", "LIKE", "ILIKE", "BETWEEN", "EXISTS",
"REGEXP", "CONTAINS", "IN", "NOT", "AND", "OR", "HASTOKEN", "HAS", "HASANY",
"HASALL", "SEARCH", "BOOL", "NUMBER", "QUOTED_TEXT", "KEY", "WS", "FREETEXT",
"HASALL", "BOOL", "NUMBER", "QUOTED_TEXT", "KEY", "WS", "FREETEXT",
}
staticData.RuleNames = []string{
"query", "expression", "orExpression", "andExpression", "unaryExpression",
@@ -50,7 +50,7 @@ func filterqueryParserInit() {
}
staticData.PredictionContextCache = antlr.NewPredictionContextCache()
staticData.serializedATN = []int32{
4, 1, 33, 219, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7,
4, 1, 32, 219, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7,
4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7,
10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15,
2, 16, 7, 16, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 5, 2, 43,
@@ -71,8 +71,8 @@ func filterqueryParserInit() {
8, 12, 10, 12, 12, 12, 204, 9, 12, 1, 13, 1, 13, 1, 13, 3, 13, 209, 8,
13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 16, 0, 0,
17, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 0, 5,
1, 0, 7, 8, 1, 0, 13, 14, 2, 0, 30, 30, 33, 33, 1, 0, 23, 27, 1, 0, 28,
31, 235, 0, 34, 1, 0, 0, 0, 2, 37, 1, 0, 0, 0, 4, 39, 1, 0, 0, 0, 6, 47,
1, 0, 7, 8, 1, 0, 13, 14, 2, 0, 29, 29, 32, 32, 1, 0, 23, 26, 1, 0, 27,
30, 235, 0, 34, 1, 0, 0, 0, 2, 37, 1, 0, 0, 0, 4, 39, 1, 0, 0, 0, 6, 47,
1, 0, 0, 0, 8, 57, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 149, 1, 0, 0, 0,
14, 163, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190,
1, 0, 0, 0, 22, 192, 1, 0, 0, 0, 24, 197, 1, 0, 0, 0, 26, 208, 1, 0, 0,
@@ -142,7 +142,7 @@ func filterqueryParserInit() {
14, 0, 208, 205, 1, 0, 0, 0, 208, 206, 1, 0, 0, 0, 208, 207, 1, 0, 0, 0,
209, 27, 1, 0, 0, 0, 210, 211, 5, 3, 0, 0, 211, 212, 3, 18, 9, 0, 212,
213, 5, 4, 0, 0, 213, 29, 1, 0, 0, 0, 214, 215, 7, 4, 0, 0, 215, 31, 1,
0, 0, 0, 216, 217, 5, 31, 0, 0, 217, 33, 1, 0, 0, 0, 11, 44, 51, 53, 57,
0, 0, 0, 216, 217, 5, 30, 0, 0, 217, 33, 1, 0, 0, 0, 11, 44, 51, 53, 57,
70, 149, 163, 180, 187, 202, 208,
}
deserializer := antlr.NewATNDeserializer(nil)
@@ -208,13 +208,12 @@ const (
FilterQueryParserHAS = 24
FilterQueryParserHASANY = 25
FilterQueryParserHASALL = 26
FilterQueryParserSEARCH = 27
FilterQueryParserBOOL = 28
FilterQueryParserNUMBER = 29
FilterQueryParserQUOTED_TEXT = 30
FilterQueryParserKEY = 31
FilterQueryParserWS = 32
FilterQueryParserFREETEXT = 33
FilterQueryParserBOOL = 27
FilterQueryParserNUMBER = 28
FilterQueryParserQUOTED_TEXT = 29
FilterQueryParserKEY = 30
FilterQueryParserWS = 31
FilterQueryParserFREETEXT = 32
)
// FilterQueryParser rules.
@@ -804,7 +803,7 @@ func (p *FilterQueryParser) AndExpression() (localctx IAndExpressionContext) {
}
_la = p.GetTokenStream().LA(1)
for (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&12879659010) != 0 {
for (int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&6437208066) != 0 {
p.SetState(51)
p.GetErrorHandler().Sync(p)
if p.HasError() {
@@ -826,7 +825,7 @@ func (p *FilterQueryParser) AndExpression() (localctx IAndExpressionContext) {
p.UnaryExpression()
}
case FilterQueryParserLPAREN, FilterQueryParserNOT, FilterQueryParserHASTOKEN, FilterQueryParserHAS, FilterQueryParserHASANY, FilterQueryParserHASALL, FilterQueryParserSEARCH, FilterQueryParserBOOL, FilterQueryParserNUMBER, FilterQueryParserQUOTED_TEXT, FilterQueryParserKEY, FilterQueryParserFREETEXT:
case FilterQueryParserLPAREN, FilterQueryParserNOT, FilterQueryParserHASTOKEN, FilterQueryParserHAS, FilterQueryParserHASANY, FilterQueryParserHASALL, FilterQueryParserBOOL, FilterQueryParserNUMBER, FilterQueryParserQUOTED_TEXT, FilterQueryParserKEY, FilterQueryParserFREETEXT:
{
p.SetState(50)
p.UnaryExpression()
@@ -2749,7 +2748,6 @@ type IFunctionCallContext interface {
HAS() antlr.TerminalNode
HASANY() antlr.TerminalNode
HASALL() antlr.TerminalNode
SEARCH() antlr.TerminalNode
// IsFunctionCallContext differentiates from other interfaces.
IsFunctionCallContext()
@@ -2827,10 +2825,6 @@ func (s *FunctionCallContext) HASALL() antlr.TerminalNode {
return s.GetToken(FilterQueryParserHASALL, 0)
}
func (s *FunctionCallContext) SEARCH() antlr.TerminalNode {
return s.GetToken(FilterQueryParserSEARCH, 0)
}
func (s *FunctionCallContext) GetRuleContext() antlr.RuleContext {
return s
}
@@ -2871,7 +2865,7 @@ func (p *FilterQueryParser) FunctionCall() (localctx IFunctionCallContext) {
p.SetState(192)
_la = p.GetTokenStream().LA(1)
if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&260046848) != 0) {
if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&125829120) != 0) {
p.GetErrorHandler().RecoverInline(p)
} else {
p.GetErrorHandler().ReportMatch(p)
@@ -3511,7 +3505,7 @@ func (p *FilterQueryParser) Value() (localctx IValueContext) {
p.SetState(214)
_la = p.GetTokenStream().LA(1)
if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&4026531840) != 0) {
if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&2013265920) != 0) {
p.GetErrorHandler().RecoverInline(p)
} else {
p.GetErrorHandler().ReportMatch(p)

View File

@@ -1343,7 +1343,7 @@ func getLocalTableName(tableName string) string {
}
func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params *retentiontypes.TTLParams) (*retentiontypes.SetTTLResponseItem, *model.ApiError) {
func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError) {
ctx = ctxtypes.NewContextWithCommentVals(ctx, map[string]string{
instrumentationtypes.TelemetrySignal: telemetrytypes.SignalLogs.StringValue(),
instrumentationtypes.CodeNamespace: "clickhouse-reader",
@@ -1378,7 +1378,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
if apiErr != nil {
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
}
if statusItem.Status == retentiontypes.TTLSettingStatusPending {
if statusItem.Status == constants.StatusPending {
return nil, &model.ApiError{Typ: model.ErrorConflict, Err: fmt.Errorf("TTL is already running")}
}
}
@@ -1437,7 +1437,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
TransactionID: uuid,
TableName: tableName,
TTL: int(params.DelDuration),
Status: retentiontypes.TTLSettingStatusPending,
Status: constants.StatusPending,
ColdStorageTTL: coldStorageDuration,
OrgID: orgID,
}
@@ -1463,7 +1463,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
NewUpdate().
Model(new(retentiontypes.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", retentiontypes.TTLSettingStatusFailed).
Set("status = ?", constants.StatusFailed).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -1483,7 +1483,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
NewUpdate().
Model(new(retentiontypes.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", retentiontypes.TTLSettingStatusFailed).
Set("status = ?", constants.StatusFailed).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -1498,7 +1498,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
NewUpdate().
Model(new(retentiontypes.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", retentiontypes.TTLSettingStatusSuccess).
Set("status = ?", constants.StatusSuccess).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -1508,10 +1508,10 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
}
}(ttlPayload)
return &retentiontypes.SetTTLResponseItem{Message: "move ttl has been successfully set up"}, nil
return &model.SetTTLResponseItem{Message: "move ttl has been successfully set up"}, nil
}
func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, params *retentiontypes.TTLParams) (*retentiontypes.SetTTLResponseItem, *model.ApiError) {
func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, params *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError) {
ctx = ctxtypes.NewContextWithCommentVals(ctx, map[string]string{
instrumentationtypes.TelemetrySignal: telemetrytypes.SignalTraces.StringValue(),
instrumentationtypes.CodeNamespace: "clickhouse-reader",
@@ -1541,7 +1541,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
if apiErr != nil {
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
}
if statusItem.Status == retentiontypes.TTLSettingStatusPending {
if statusItem.Status == constants.StatusPending {
return nil, &model.ApiError{Typ: model.ErrorConflict, Err: fmt.Errorf("TTL is already running")}
}
}
@@ -1575,7 +1575,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
TransactionID: uuid,
TableName: tableName,
TTL: int(params.DelDuration),
Status: retentiontypes.TTLSettingStatusPending,
Status: constants.StatusPending,
ColdStorageTTL: coldStorageDuration,
OrgID: orgID,
}
@@ -1613,7 +1613,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
NewUpdate().
Model(new(retentiontypes.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", retentiontypes.TTLSettingStatusFailed).
Set("status = ?", constants.StatusFailed).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -1634,7 +1634,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
NewUpdate().
Model(new(retentiontypes.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", retentiontypes.TTLSettingStatusFailed).
Set("status = ?", constants.StatusFailed).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -1649,7 +1649,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
NewUpdate().
Model(new(retentiontypes.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", retentiontypes.TTLSettingStatusSuccess).
Set("status = ?", constants.StatusSuccess).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -1658,7 +1658,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
}
}(distributedTableName)
}
return &retentiontypes.SetTTLResponseItem{Message: "move ttl has been successfully set up"}, nil
return &model.SetTTLResponseItem{Message: "move ttl has been successfully set up"}, nil
}
func (r *ClickHouseReader) hasCustomRetentionColumn(ctx context.Context) (bool, error) {
@@ -1687,7 +1687,7 @@ func (r *ClickHouseReader) hasCustomRetentionColumn(ctx context.Context) (bool,
return true, nil
}
func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *retentiontypes.CustomRetentionTTLParams) (*retentiontypes.CustomRetentionTTLResponse, error) {
func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *model.CustomRetentionTTLParams) (*model.CustomRetentionTTLResponse, error) {
ctx = ctxtypes.NewContextWithCommentVals(ctx, map[string]string{
instrumentationtypes.TelemetrySignal: telemetrytypes.SignalLogs.StringValue(),
@@ -1702,7 +1702,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *r
if !hasCustomRetention {
r.logger.Info("Custom retention not supported, falling back to standard TTL method", "orgID", orgID)
ttlParams := &retentiontypes.TTLParams{
ttlParams := &model.TTLParams{
Type: params.Type,
DelDuration: int64(params.DefaultTTLDays * 24 * 3600),
}
@@ -1723,7 +1723,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *r
return nil, errorsV2.Wrapf(apiErr.Err, errorsV2.TypeInternal, errorsV2.CodeInternal, "failed to set standard TTL")
}
return &retentiontypes.CustomRetentionTTLResponse{
return &model.CustomRetentionTTLResponse{
Message: fmt.Sprintf("Custom retention not supported, applied standard TTL of %d days. %s", params.DefaultTTLDays, ttlResult.Message),
}, nil
}
@@ -1734,7 +1734,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *r
uuidWithHyphen := valuer.GenerateUUID()
uuid := strings.Replace(uuidWithHyphen.String(), "-", "", -1)
if params.Type != retentiontypes.LogsTTL {
if params.Type != constants.LogsTTL {
return nil, errorsV2.Newf(errorsV2.TypeInternal, errorsV2.CodeInternal, "custom retention TTL only supported for logs")
}
@@ -1765,7 +1765,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *r
if apiErr != nil {
return nil, errorsV2.Newf(errorsV2.TypeInternal, errorsV2.CodeInternal, "error in processing custom_retention_ttl_status check sql query")
}
if statusItem.Status == retentiontypes.TTLSettingStatusPending {
if statusItem.Status == constants.StatusPending {
return nil, errorsV2.Newf(errorsV2.TypeInternal, errorsV2.CodeInternal, "custom retention TTL is already running")
}
}
@@ -1851,7 +1851,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *r
TableName: tableName,
TTL: params.DefaultTTLDays,
Condition: string(ttlConditionsJSON),
Status: retentiontypes.TTLSettingStatusPending,
Status: constants.StatusPending,
ColdStorageTTL: coldStorageDuration,
OrgID: orgID,
}
@@ -1867,7 +1867,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *r
err := r.setColdStorage(ctx, tableName, params.ColdStorageVolume)
if err != nil {
r.logger.Error("error in setting cold storage", errorsV2.Attr(err))
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, retentiontypes.TTLSettingStatusFailed)
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, constants.StatusFailed)
return nil, errorsV2.Wrapf(err.Err, errorsV2.TypeInternal, errorsV2.CodeInternal, "error setting cold storage for table %s", tableName)
}
}
@@ -1876,21 +1876,21 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *r
r.logger.Debug("Executing custom retention TTL request: ", "request", query, "step", i+1)
if err := r.db.Exec(ctx, query); err != nil {
r.logger.Error("error while setting custom retention ttl", errorsV2.Attr(err))
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, retentiontypes.TTLSettingStatusFailed)
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, constants.StatusFailed)
return nil, errorsV2.Wrapf(err, errorsV2.TypeInternal, errorsV2.CodeInternal, "error setting custom retention TTL for table %s, query: %s", tableName, query)
}
}
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, retentiontypes.TTLSettingStatusSuccess)
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, constants.StatusSuccess)
}
return &retentiontypes.CustomRetentionTTLResponse{
return &model.CustomRetentionTTLResponse{
Message: "custom retention TTL has been successfully set up",
}, nil
}
// New method to build multiIf expressions with support for multiple AND conditions
func (r *ClickHouseReader) buildMultiIfExpression(ttlConditions []retentiontypes.CustomRetentionRule, defaultTTLDays int, isResourceTable bool) string {
func (r *ClickHouseReader) buildMultiIfExpression(ttlConditions []model.CustomRetentionRule, defaultTTLDays int, isResourceTable bool) string {
var conditions []string
for i, rule := range ttlConditions {
@@ -1962,7 +1962,7 @@ func (r *ClickHouseReader) buildMultiIfExpression(ttlConditions []retentiontypes
return result
}
func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID string) (*retentiontypes.GetCustomRetentionTTLResponse, error) {
func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID string) (*model.GetCustomRetentionTTLResponse, error) {
// Check if V2 (custom retention) is supported
hasCustomRetention, err := r.hasCustomRetentionColumn(ctx)
if err != nil {
@@ -1971,7 +1971,7 @@ func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID stri
hasCustomRetention = false
}
response := &retentiontypes.GetCustomRetentionTTLResponse{}
response := &model.GetCustomRetentionTTLResponse{}
if hasCustomRetention {
// V2 - Custom retention is supported
@@ -1994,19 +1994,19 @@ func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID stri
if err == sql.ErrNoRows {
// No V2 configuration found, return defaults
response.DefaultTTLDays = retentiontypes.DefaultLogsRetentionDays
response.TTLConditions = []retentiontypes.CustomRetentionRule{}
response.Status = retentiontypes.TTLSettingStatusSuccess
response.DefaultTTLDays = 15
response.TTLConditions = []model.CustomRetentionRule{}
response.Status = constants.StatusSuccess
response.ColdStorageTTLDays = -1
return response, nil
}
// Parse TTL conditions from Condition
var ttlConditions []retentiontypes.CustomRetentionRule
var ttlConditions []model.CustomRetentionRule
if customTTL.Condition != "" {
if err := json.Unmarshal([]byte(customTTL.Condition), &ttlConditions); err != nil {
r.logger.Error("Error parsing TTL conditions", errorsV2.Attr(err))
ttlConditions = []retentiontypes.CustomRetentionRule{}
ttlConditions = []model.CustomRetentionRule{}
}
}
@@ -2020,8 +2020,8 @@ func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID stri
response.Version = "v1"
// Get V1 TTL configuration
ttlParams := &retentiontypes.GetTTLParams{
Type: retentiontypes.LogsTTL,
ttlParams := &model.GetTTLParams{
Type: constants.LogsTTL,
}
ttlResult, apiErr := r.GetTTL(ctx, orgID, ttlParams)
@@ -2041,7 +2041,7 @@ func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID stri
}
// For V1, we don't have TTL conditions
response.TTLConditions = []retentiontypes.CustomRetentionRule{}
response.TTLConditions = []model.CustomRetentionRule{}
}
return response, nil
@@ -2081,7 +2081,7 @@ func (r *ClickHouseReader) updateCustomRetentionTTLStatus(ctx context.Context, o
}
// Enhanced validation function with duplicate detection and efficient key validation
func (r *ClickHouseReader) validateTTLConditions(ctx context.Context, ttlConditions []retentiontypes.CustomRetentionRule) error {
func (r *ClickHouseReader) validateTTLConditions(ctx context.Context, ttlConditions []model.CustomRetentionRule) error {
ctx = ctxtypes.NewContextWithCommentVals(ctx, map[string]string{
instrumentationtypes.CodeNamespace: "clickhouse-reader",
instrumentationtypes.CodeFunctionName: "validateTTLConditions",
@@ -2185,16 +2185,16 @@ func (r *ClickHouseReader) validateTTLConditions(ctx context.Context, ttlConditi
// SetTTL sets the TTL for traces or metrics or logs tables.
// This is an async API which creates goroutines to set TTL.
// Status of TTL update is tracked with ttl_status table in sqlite db.
func (r *ClickHouseReader) SetTTL(ctx context.Context, orgID string, params *retentiontypes.TTLParams) (*retentiontypes.SetTTLResponseItem, *model.ApiError) {
func (r *ClickHouseReader) SetTTL(ctx context.Context, orgID string, params *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError) {
// Keep only latest 100 transactions/requests
r.deleteTtlTransactions(ctx, orgID, 100)
switch params.Type {
case retentiontypes.TraceTTL:
case constants.TraceTTL:
return r.setTTLTraces(ctx, orgID, params)
case retentiontypes.MetricsTTL:
case constants.MetricsTTL:
return r.setTTLMetrics(ctx, orgID, params)
case retentiontypes.LogsTTL:
case constants.LogsTTL:
return r.setTTLLogs(ctx, orgID, params)
default:
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while setting ttl. ttl type should be <metrics|traces>, got %v", params.Type)}
@@ -2202,7 +2202,7 @@ func (r *ClickHouseReader) SetTTL(ctx context.Context, orgID string, params *ret
}
func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, params *retentiontypes.TTLParams) (*retentiontypes.SetTTLResponseItem, *model.ApiError) {
func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, params *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError) {
ctx = ctxtypes.NewContextWithCommentVals(ctx, map[string]string{
instrumentationtypes.TelemetrySignal: telemetrytypes.SignalMetrics.StringValue(),
instrumentationtypes.CodeNamespace: "clickhouse-reader",
@@ -2231,7 +2231,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
if apiErr != nil {
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
}
if statusItem.Status == retentiontypes.TTLSettingStatusPending {
if statusItem.Status == constants.StatusPending {
return nil, &model.ApiError{Typ: model.ErrorConflict, Err: fmt.Errorf("TTL is already running")}
}
}
@@ -2247,7 +2247,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
TransactionID: uuid,
TableName: tableName,
TTL: int(params.DelDuration),
Status: retentiontypes.TTLSettingStatusPending,
Status: constants.StatusPending,
ColdStorageTTL: coldStorageDuration,
OrgID: orgID,
}
@@ -2285,7 +2285,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
NewUpdate().
Model(new(retentiontypes.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", retentiontypes.TTLSettingStatusFailed).
Set("status = ?", constants.StatusFailed).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -2306,7 +2306,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
NewUpdate().
Model(new(retentiontypes.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", retentiontypes.TTLSettingStatusFailed).
Set("status = ?", constants.StatusFailed).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -2321,7 +2321,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
NewUpdate().
Model(new(retentiontypes.TTLSetting)).
Set("updated_at = ?", time.Now()).
Set("status = ?", retentiontypes.TTLSettingStatusSuccess).
Set("status = ?", constants.StatusSuccess).
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
@@ -2332,7 +2332,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
for _, tableName := range tableNames {
go metricTTL(tableName)
}
return &retentiontypes.SetTTLResponseItem{Message: "move ttl has been successfully set up"}, nil
return &model.SetTTLResponseItem{Message: "move ttl has been successfully set up"}, nil
}
func (r *ClickHouseReader) deleteTtlTransactions(ctx context.Context, orgID string, numberOfTransactionsStore int) {
@@ -2389,7 +2389,7 @@ func (r *ClickHouseReader) checkTTLStatusItem(ctx context.Context, orgID string,
// getTTLQueryStatus fetches ttl_status table status from DB
func (r *ClickHouseReader) getTTLQueryStatus(ctx context.Context, orgID string, tableNameArray []string) (string, *model.ApiError) {
failFlag := false
status := retentiontypes.TTLSettingStatusSuccess
status := constants.StatusSuccess
for _, tableName := range tableNameArray {
statusItem, apiErr := r.checkTTLStatusItem(ctx, orgID, tableName)
emptyStatusStruct := new(retentiontypes.TTLSetting)
@@ -2399,16 +2399,16 @@ func (r *ClickHouseReader) getTTLQueryStatus(ctx context.Context, orgID string,
if apiErr != nil {
return "", &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
}
if statusItem.Status == retentiontypes.TTLSettingStatusPending && statusItem.UpdatedAt.Unix()-time.Now().Unix() < 3600 {
status = retentiontypes.TTLSettingStatusPending
if statusItem.Status == constants.StatusPending && statusItem.UpdatedAt.Unix()-time.Now().Unix() < 3600 {
status = constants.StatusPending
return status, nil
}
if statusItem.Status == retentiontypes.TTLSettingStatusFailed {
if statusItem.Status == constants.StatusFailed {
failFlag = true
}
}
if failFlag {
status = retentiontypes.TTLSettingStatusFailed
status = constants.StatusFailed
}
return status, nil
@@ -2461,7 +2461,7 @@ func getLocalTableNameArray(tableNames []string) []string {
}
// GetTTL returns current ttl, expected ttl and past setTTL status for metrics/traces.
func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *retentiontypes.GetTTLParams) (*retentiontypes.GetTTLResponseItem, *model.ApiError) {
func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *model.GetTTLParams) (*model.GetTTLResponseItem, *model.ApiError) {
ctx = ctxtypes.NewContextWithCommentVals(ctx, map[string]string{
instrumentationtypes.CodeNamespace: "clickhouse-reader",
@@ -2496,8 +2496,8 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
return delTTL, moveTTL
}
getMetricsTTL := func() (*retentiontypes.DBResponseTTL, *model.ApiError) {
var dbResp []retentiontypes.DBResponseTTL
getMetricsTTL := func() (*model.DBResponseTTL, *model.ApiError) {
var dbResp []model.DBResponseTTL
query := fmt.Sprintf("SELECT engine_full FROM system.tables WHERE name='%v'", signozSampleLocalTableName)
@@ -2514,8 +2514,8 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
}
}
getTracesTTL := func() (*retentiontypes.DBResponseTTL, *model.ApiError) {
var dbResp []retentiontypes.DBResponseTTL
getTracesTTL := func() (*model.DBResponseTTL, *model.ApiError) {
var dbResp []model.DBResponseTTL
query := fmt.Sprintf("SELECT engine_full FROM system.tables WHERE name='%v' AND database='%v'", r.traceLocalTableName, signozTraceDBName)
@@ -2532,8 +2532,8 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
}
}
getLogsTTL := func() (*retentiontypes.DBResponseTTL, *model.ApiError) {
var dbResp []retentiontypes.DBResponseTTL
getLogsTTL := func() (*model.DBResponseTTL, *model.ApiError) {
var dbResp []model.DBResponseTTL
query := fmt.Sprintf("SELECT engine_full FROM system.tables WHERE name='%v' AND database='%v'", r.logsLocalTableName, r.logsDB)
@@ -2551,7 +2551,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
}
switch ttlParams.Type {
case retentiontypes.TraceTTL:
case constants.TraceTTL:
tableNameArray := []string{
r.TraceDB + "." + r.traceTableName,
r.TraceDB + "." + r.traceResourceTableV3,
@@ -2579,9 +2579,9 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
}
delTTL, moveTTL := parseTTL(dbResp.EngineFull)
return &retentiontypes.GetTTLResponseItem{TracesTime: delTTL, TracesMoveTime: moveTTL, ExpectedTracesTime: ttlQuery.TTL, ExpectedTracesMoveTime: ttlQuery.ColdStorageTTL, Status: status}, nil
return &model.GetTTLResponseItem{TracesTime: delTTL, TracesMoveTime: moveTTL, ExpectedTracesTime: ttlQuery.TTL, ExpectedTracesMoveTime: ttlQuery.ColdStorageTTL, Status: status}, nil
case retentiontypes.MetricsTTL:
case constants.MetricsTTL:
tableNameArray := []string{signozMetricDBName + "." + signozSampleTableName}
tableNameArray = getLocalTableNameArray(tableNameArray)
status, apiErr := r.getTTLQueryStatus(ctx, orgID, tableNameArray)
@@ -2602,9 +2602,9 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
}
delTTL, moveTTL := parseTTL(dbResp.EngineFull)
return &retentiontypes.GetTTLResponseItem{MetricsTime: delTTL, MetricsMoveTime: moveTTL, ExpectedMetricsTime: ttlQuery.TTL, ExpectedMetricsMoveTime: ttlQuery.ColdStorageTTL, Status: status}, nil
return &model.GetTTLResponseItem{MetricsTime: delTTL, MetricsMoveTime: moveTTL, ExpectedMetricsTime: ttlQuery.TTL, ExpectedMetricsMoveTime: ttlQuery.ColdStorageTTL, Status: status}, nil
case retentiontypes.LogsTTL:
case constants.LogsTTL:
tableNameArray := []string{r.logsDB + "." + r.logsTableName}
tableNameArray = getLocalTableNameArray(tableNameArray)
status, apiErr := r.getTTLQueryStatus(ctx, orgID, tableNameArray)
@@ -2625,7 +2625,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
}
delTTL, moveTTL := parseTTL(dbResp.EngineFull)
return &retentiontypes.GetTTLResponseItem{LogsTime: delTTL, LogsMoveTime: moveTTL, ExpectedLogsTime: ttlQuery.TTL, ExpectedLogsMoveTime: ttlQuery.ColdStorageTTL, Status: status}, nil
return &model.GetTTLResponseItem{LogsTime: delTTL, LogsMoveTime: moveTTL, ExpectedLogsTime: ttlQuery.TTL, ExpectedLogsMoveTime: ttlQuery.ColdStorageTTL, Status: status}, nil
default:
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting ttl. ttl type should be metrics|traces, got %v",

View File

@@ -34,7 +34,6 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/gorilla/mux"
@@ -1678,7 +1677,7 @@ func (aH *APIHandler) setCustomRetentionTTL(w http.ResponseWriter, r *http.Reque
return
}
var params retentiontypes.CustomRetentionTTLParams
var params model.CustomRetentionTTLParams
if err := json.NewDecoder(r.Body).Decode(&params); err != nil {
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, "Invalid data"))
return

View File

@@ -40,7 +40,6 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/postprocess"
"github.com/SigNoz/signoz/pkg/query-service/utils"
querytemplate "github.com/SigNoz/signoz/pkg/query-service/utils/queryTemplate"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
chVariables "github.com/SigNoz/signoz/pkg/variables/clickhouse"
)
@@ -420,7 +419,7 @@ func parseTime(param string, r *http.Request) (*time.Time, error) {
}
func parseTTLParams(r *http.Request) (*retentiontypes.TTLParams, error) {
func parseTTLParams(r *http.Request) (*model.TTLParams, error) {
// make sure either of the query params are present
typeTTL := r.URL.Query().Get("type")
@@ -433,7 +432,7 @@ func parseTTLParams(r *http.Request) (*retentiontypes.TTLParams, error) {
}
// Validate the type parameter
if typeTTL != retentiontypes.TraceTTL && typeTTL != retentiontypes.MetricsTTL && typeTTL != retentiontypes.LogsTTL {
if typeTTL != baseconstants.TraceTTL && typeTTL != baseconstants.MetricsTTL && typeTTL != baseconstants.LogsTTL {
return nil, fmt.Errorf("type param should be metrics|traces|logs, got %v", typeTTL)
}
@@ -456,7 +455,7 @@ func parseTTLParams(r *http.Request) (*retentiontypes.TTLParams, error) {
}
}
return &retentiontypes.TTLParams{
return &model.TTLParams{
Type: typeTTL,
DelDuration: int64(durationParsed.Seconds()),
ColdStorageVolume: coldStorage,
@@ -464,7 +463,7 @@ func parseTTLParams(r *http.Request) (*retentiontypes.TTLParams, error) {
}, nil
}
func parseGetTTL(r *http.Request) (*retentiontypes.GetTTLParams, error) {
func parseGetTTL(r *http.Request) (*model.GetTTLParams, error) {
typeTTL := r.URL.Query().Get("type")
@@ -472,12 +471,12 @@ func parseGetTTL(r *http.Request) (*retentiontypes.GetTTLParams, error) {
return nil, fmt.Errorf("type param cannot be empty from the query")
} else {
// Validate the type parameter
if typeTTL != retentiontypes.TraceTTL && typeTTL != retentiontypes.MetricsTTL && typeTTL != retentiontypes.LogsTTL {
if typeTTL != baseconstants.TraceTTL && typeTTL != baseconstants.MetricsTTL && typeTTL != baseconstants.LogsTTL {
return nil, fmt.Errorf("type param should be metrics|traces|logs, got %v", typeTTL)
}
}
return &retentiontypes.GetTTLParams{Type: typeTTL}, nil
return &model.GetTTLParams{Type: typeTTL}, nil
}
func parseAggregateAttributeRequest(r *http.Request) (*v3.AggregateAttributeRequest, error) {

View File

@@ -19,6 +19,10 @@ const (
const MaxAllowedPointsInTimeSeries = 300
const TraceTTL = "traces"
const MetricsTTL = "metrics"
const LogsTTL = "logs"
const SpanSearchScopeRoot = "isroot"
const SpanSearchScopeEntryPoint = "isentrypoint"
const OrderBySpanCount = "span_count"

View File

@@ -7,7 +7,6 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/model"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/querycache"
"github.com/SigNoz/signoz/pkg/types/retentiontypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/util/stats"
@@ -24,8 +23,8 @@ type Reader interface {
GetServicesList(ctx context.Context) (*[]string, error)
GetDependencyGraph(ctx context.Context, query *model.GetServicesParams) (*[]model.ServiceMapDependencyResponseItem, error)
GetTTL(ctx context.Context, orgID string, ttlParams *retentiontypes.GetTTLParams) (*retentiontypes.GetTTLResponseItem, *model.ApiError)
GetCustomRetentionTTL(ctx context.Context, orgID string) (*retentiontypes.GetCustomRetentionTTLResponse, error)
GetTTL(ctx context.Context, orgID string, ttlParams *model.GetTTLParams) (*model.GetTTLResponseItem, *model.ApiError)
GetCustomRetentionTTL(ctx context.Context, orgID string) (*model.GetCustomRetentionTTLResponse, error)
// GetDisks returns a list of disks configured in the underlying DB. It is supported by
// clickhouse only.
@@ -47,8 +46,8 @@ type Reader interface {
GetFlamegraphSpansForTrace(ctx context.Context, orgID valuer.UUID, traceID string, req *model.GetFlamegraphSpansForTraceParams) (*model.GetFlamegraphSpansForTraceResponse, error)
// Setter Interfaces
SetTTL(ctx context.Context, orgID string, ttlParams *retentiontypes.TTLParams) (*retentiontypes.SetTTLResponseItem, *model.ApiError)
SetTTLV2(ctx context.Context, orgID string, params *retentiontypes.CustomRetentionTTLParams) (*retentiontypes.CustomRetentionTTLResponse, error)
SetTTL(ctx context.Context, orgID string, ttlParams *model.TTLParams) (*model.SetTTLResponseItem, *model.ApiError)
SetTTLV2(ctx context.Context, orgID string, params *model.CustomRetentionTTLParams) (*model.CustomRetentionTTLResponse, error)
FetchTemporality(ctx context.Context, orgID valuer.UUID, metricNames []string) (map[string]map[v3.Temporality]bool, error)
GetMetricAggregateAttributes(ctx context.Context, orgID valuer.UUID, req *v3.AggregateAttributeRequest, skipSignozMetrics bool) (*v3.AggregateAttributeResponse, error)

View File

@@ -404,6 +404,56 @@ type TagKey struct {
Type TagDataType `json:"type"`
}
type TTLParams struct {
Type string // It can be one of {traces, metrics}.
ColdStorageVolume string // Name of the cold storage volume.
ToColdStorageDuration int64 // Seconds after which data will be moved to cold storage.
DelDuration int64 // Seconds after which data will be deleted.
}
type CustomRetentionTTLParams struct {
Type string `json:"type"`
DefaultTTLDays int `json:"defaultTTLDays"`
TTLConditions []CustomRetentionRule `json:"ttlConditions"`
ColdStorageVolume string `json:"coldStorageVolume,omitempty"`
ToColdStorageDurationDays int64 `json:"coldStorageDurationDays,omitempty"`
}
type CustomRetentionRule struct {
Filters []FilterCondition `json:"conditions"`
TTLDays int `json:"ttlDays"`
}
type FilterCondition struct {
Key string `json:"key"`
Values []string `json:"values"`
}
type GetCustomRetentionTTLResponse struct {
Version string `json:"version"`
Status string `json:"status"`
// V1 fields
// LogsTime int `json:"logs_ttl_duration_hrs,omitempty"`
// LogsMoveTime int `json:"logs_move_ttl_duration_hrs,omitempty"`
ExpectedLogsTime int `json:"expected_logs_ttl_duration_hrs,omitempty"`
ExpectedLogsMoveTime int `json:"expected_logs_move_ttl_duration_hrs,omitempty"`
// V2 fields
DefaultTTLDays int `json:"default_ttl_days,omitempty"`
TTLConditions []CustomRetentionRule `json:"ttl_conditions,omitempty"`
ColdStorageVolume string `json:"cold_storage_volume,omitempty"`
ColdStorageTTLDays int `json:"cold_storage_ttl_days,omitempty"`
}
type CustomRetentionTTLResponse struct {
Message string `json:"message"`
}
type GetTTLParams struct {
Type string
}
type ListErrorsParams struct {
StartStr string `json:"start"`
EndStr string `json:"end"`

View File

@@ -150,6 +150,16 @@ type RuleResponseItem struct {
Data string `json:"data" db:"data"`
}
type TTLStatusItem struct {
Id int `json:"id" db:"id"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
TableName string `json:"table_name" db:"table_name"`
TTL int `json:"ttl" db:"ttl"`
Status string `json:"status" db:"status"`
ColdStorageTtl int `json:"cold_storage_ttl" db:"cold_storage_ttl"`
}
type ChannelItem struct {
Id int `json:"id" db:"id"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
@@ -452,11 +462,35 @@ type SpanAggregatesDBResponseItem struct {
GroupBy string `ch:"groupBy"`
}
type SetTTLResponseItem struct {
Message string `json:"message"`
}
type DiskItem struct {
Name string `json:"name,omitempty" ch:"name"`
Type string `json:"type,omitempty" ch:"type"`
}
type DBResponseTTL struct {
EngineFull string `ch:"engine_full"`
}
type GetTTLResponseItem struct {
MetricsTime int `json:"metrics_ttl_duration_hrs,omitempty"`
MetricsMoveTime int `json:"metrics_move_ttl_duration_hrs,omitempty"`
TracesTime int `json:"traces_ttl_duration_hrs,omitempty"`
TracesMoveTime int `json:"traces_move_ttl_duration_hrs,omitempty"`
LogsTime int `json:"logs_ttl_duration_hrs,omitempty"`
LogsMoveTime int `json:"logs_move_ttl_duration_hrs,omitempty"`
ExpectedMetricsTime int `json:"expected_metrics_ttl_duration_hrs,omitempty"`
ExpectedMetricsMoveTime int `json:"expected_metrics_move_ttl_duration_hrs,omitempty"`
ExpectedTracesTime int `json:"expected_traces_ttl_duration_hrs,omitempty"`
ExpectedTracesMoveTime int `json:"expected_traces_move_ttl_duration_hrs,omitempty"`
ExpectedLogsTime int `json:"expected_logs_ttl_duration_hrs,omitempty"`
ExpectedLogsMoveTime int `json:"expected_logs_move_ttl_duration_hrs,omitempty"`
Status string `json:"status"`
}
type DBResponseServiceName struct {
ServiceName string `ch:"serviceName"`
Count uint64 `ch:"count"`

View File

@@ -8,17 +8,6 @@ const (
// BodyFullTextSearchDefaultWarning is emitted when a full-text search or "body" searches are hit
// with New JSON Body enhancements.
BodyFullTextSearchDefaultWarning = "Full text searches default to `body.message:string`. Use `body.<key>` to search a different field inside body"
// FTSInternalKey is the sentinel Name on TelemetryFieldKey instances that represent
// wildcard map searches (all attribute/resource keys+values). The unconventional value
// prevents collision with any real field name a user could type.
FTSInternalKey = "_X_INTERNAL_FTS_KEY"
// SearchFunctionName is the grammar function name for full-text search.
SearchFunctionName = "search"
// FTSMaxWindowNs is the maximum allowed time range for a search() query (6 hours).
FTSMaxWindowNs = uint64(6 * 60 * 60 * 1_000_000_000)
)
var (

View File

@@ -46,7 +46,6 @@ type filterExpressionVisitor struct {
keysWithWarnings map[string]bool
startNs uint64
endNs uint64
ftsFieldKeys []*telemetrytypes.TelemetryFieldKey
}
type FilterExprVisitorOpts struct {
@@ -66,9 +65,6 @@ type FilterExprVisitorOpts struct {
Variables map[string]qbtypes.VariableItem
StartNs uint64
EndNs uint64
// FTSFieldKeys enables search() for this query context. nil disables search()
// (traces, metrics, and non-log callers leave this nil).
FTSFieldKeys []*telemetrytypes.TelemetryFieldKey
}
// newFilterExpressionVisitor creates a new filterExpressionVisitor.
@@ -91,7 +87,6 @@ func newFilterExpressionVisitor(opts FilterExprVisitorOpts) *filterExpressionVis
keysWithWarnings: make(map[string]bool),
startNs: opts.StartNs,
endNs: opts.EndNs,
ftsFieldKeys: opts.FTSFieldKeys,
}
}
@@ -339,9 +334,14 @@ func (v *filterExpressionVisitor) VisitPrimary(ctx *grammar.PrimaryContext) any
return SkipConditionLiteral
}
if v.fullTextColumn == nil {
v.errors = append(v.errors, "full text search is not supported")
return ErrorConditionLiteral
}
child := ctx.GetChild(0)
var searchText string
if keyCtx, ok := child.(*grammar.KeyContext); ok {
// create a full text search condition on the body field
searchText = keyCtx.GetText()
} else if valCtx, ok := child.(*grammar.ValueContext); ok {
if valCtx.QUOTED_TEXT() != nil {
@@ -357,15 +357,6 @@ func (v *filterExpressionVisitor) VisitPrimary(ctx *grammar.PrimaryContext) any
return ErrorConditionLiteral
}
}
if len(v.ftsFieldKeys) > 0 {
return v.runSearchFunction(searchText)
}
if v.fullTextColumn == nil {
v.errors = append(v.errors, "full text search is not supported")
return ErrorConditionLiteral
}
cond, err := v.conditionBuilder.ConditionFor(context.Background(), v.startNs, v.endNs, v.fullTextColumn, qbtypes.FilterOperatorRegexp, FormatFullTextSearch(searchText), v.builder)
if err != nil {
v.errors = append(v.errors, fmt.Sprintf("failed to build full text search condition: %s", err.Error()))
@@ -374,6 +365,7 @@ func (v *filterExpressionVisitor) VisitPrimary(ctx *grammar.PrimaryContext) any
if v.bodyJSONEnabled && v.fullTextColumn.Name == "body" {
v.warnings = append(v.warnings, BodyFullTextSearchDefaultWarning)
}
return cond
}
@@ -719,10 +711,6 @@ func (v *filterExpressionVisitor) VisitFullText(ctx *grammar.FullTextContext) an
text = ctx.FREETEXT().GetText()
}
if len(v.ftsFieldKeys) > 0 {
return v.runSearchFunction(text)
}
if v.fullTextColumn == nil {
v.errors = append(v.errors, "full text search is not supported")
return ErrorConditionLiteral
@@ -740,19 +728,12 @@ func (v *filterExpressionVisitor) VisitFullText(ctx *grammar.FullTextContext) an
return cond
}
// VisitFunctionCall handles function calls like has(), hasAny(), search(), etc.
// VisitFunctionCall handles function calls like has(), hasAny(), etc.
func (v *filterExpressionVisitor) VisitFunctionCall(ctx *grammar.FunctionCallContext) any {
if v.skipFunctionCalls {
return SkipConditionLiteral
}
// search() must be handled before visiting params: unquoted tokens like
// search(error) are parsed as a key context, and visiting them through VisitKey
// would append "key not found" errors before we can treat the text as a search string.
if ctx.SEARCH() != nil {
return v.visitSearchFunction(ctx)
}
// Get function name based on which token is present
var functionName string
if ctx.HAS() != nil {
@@ -861,63 +842,6 @@ func (v *filterExpressionVisitor) VisitFunctionCall(ctx *grammar.FunctionCallCon
return v.builder.Or(conds...)
}
// runSearchFunction fans a regex match for text across all ftsFieldKeys using OR.
// Used by both explicit search() calls and implicit bare-expression FTS for logs.
// Enforces the FTSMaxWindowNs guard so all callers share the same time-window limit.
func (v *filterExpressionVisitor) runSearchFunction(text string) any {
if v.endNs > 0 && v.startNs > 0 && (v.endNs-v.startNs) > FTSMaxWindowNs {
v.errors = append(v.errors, "full text search is restricted to a maximum of 6-hour time window")
return ErrorConditionLiteral
}
formattedText := FormatFullTextSearch(text)
var ftsConds []string
for _, key := range v.ftsFieldKeys {
cond, err := v.conditionBuilder.ConditionFor(v.context, v.startNs, v.endNs, key, qbtypes.FilterOperatorRegexp, formattedText, v.builder)
if err != nil {
continue
}
ftsConds = append(ftsConds, cond)
}
if len(ftsConds) == 0 {
return ErrorConditionLiteral
}
return v.builder.Or(ftsConds...)
}
// visitSearchFunction handles the search() function call.
// search('value') or search(value) fans out a regex match across all FTS column keys.
func (v *filterExpressionVisitor) visitSearchFunction(ctx *grammar.FunctionCallContext) any {
// ftsFieldKeys == nil means search() is not enabled for this signal/query type.
// Only log statement builders set FTSFieldKeys; traces/metrics leave it nil.
if len(v.ftsFieldKeys) == 0 {
v.errors = append(v.errors, "search() is only supported for log queries")
return ErrorConditionLiteral
}
// Extract the search text directly from the parse tree — bypass VisitKey so that
// unquoted tokens like search(error) don't trigger "key not found" errors.
paramCtxs := ctx.FunctionParamList().AllFunctionParam()
if len(paramCtxs) < 1 {
v.errors = append(v.errors, "search() requires a value parameter, e.g. search('error') or search(error)")
return ErrorConditionLiteral
}
paramCtx := paramCtxs[0]
var searchText string
if paramCtx.Value() != nil {
raw := v.Visit(paramCtx.Value())
searchText = fmt.Sprintf("%v", raw)
} else if paramCtx.Key() != nil {
// Unquoted word — use the raw token text, bypassing the key lookup.
searchText = paramCtx.Key().GetText()
} else {
v.errors = append(v.errors, "search() parameter must be a string value")
return ErrorConditionLiteral
}
return v.runSearchFunction(searchText)
}
// VisitFunctionParamList handles the parameter list for function calls.
func (v *filterExpressionVisitor) VisitFunctionParamList(ctx *grammar.FunctionParamListContext) any {
params := ctx.AllFunctionParam()

View File

@@ -23,7 +23,6 @@ import (
"github.com/SigNoz/signoz/pkg/global"
"github.com/SigNoz/signoz/pkg/identn"
"github.com/SigNoz/signoz/pkg/instrumentation"
"github.com/SigNoz/signoz/pkg/meterreporter"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
"github.com/SigNoz/signoz/pkg/modules/inframonitoring"
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
@@ -136,9 +135,6 @@ type Config struct {
// Auditor config
Auditor auditor.Config `mapstructure:"auditor"`
// MeterReporter config
MeterReporter meterreporter.Config `mapstructure:"meterreporter"`
// CloudIntegration config
CloudIntegration cloudintegration.Config `mapstructure:"cloudintegration"`
@@ -179,7 +175,6 @@ func NewConfig(ctx context.Context, logger *slog.Logger, resolverConfig config.R
identn.NewConfigFactory(),
serviceaccount.NewConfigFactory(),
auditor.NewConfigFactory(),
meterreporter.NewConfigFactory(),
cloudintegration.NewConfigFactory(),
tracedetail.NewConfigFactory(),
authz.NewConfigFactory(),

View File

@@ -16,7 +16,6 @@ import (
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
"github.com/SigNoz/signoz/pkg/modules/retention/implretention"
"github.com/SigNoz/signoz/pkg/modules/user/impluser"
"github.com/SigNoz/signoz/pkg/querier"
"github.com/SigNoz/signoz/pkg/queryparser"
@@ -53,8 +52,7 @@ func TestNewHandlers(t *testing.T) {
userRoleStore := impluser.NewUserRoleStore(sqlstore, providerSettings)
userGetter := impluser.NewGetter(impluser.NewStore(sqlstore, providerSettings), userRoleStore, flagger)
retentionGetter := implretention.NewGetter(implretention.NewStore(sqlstore))
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter, userRoleStore, nil, nil, retentionGetter, flagger)
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter, userRoleStore, nil, nil, flagger)
querierHandler := querier.NewHandler(providerSettings, nil, nil)
registryHandler := factory.NewHandler(nil)

View File

@@ -29,7 +29,6 @@ import (
"github.com/SigNoz/signoz/pkg/modules/quickfilter/implquickfilter"
"github.com/SigNoz/signoz/pkg/modules/rawdataexport"
"github.com/SigNoz/signoz/pkg/modules/rawdataexport/implrawdataexport"
"github.com/SigNoz/signoz/pkg/modules/retention"
"github.com/SigNoz/signoz/pkg/modules/rulestatehistory"
"github.com/SigNoz/signoz/pkg/modules/rulestatehistory/implrulestatehistory"
"github.com/SigNoz/signoz/pkg/modules/savedview"
@@ -64,7 +63,6 @@ type Modules struct {
Preference preference.Module
UserSetter user.Setter
UserGetter user.Getter
RetentionGetter retention.Getter
SavedView savedview.Module
Apdex apdex.Module
Dashboard dashboard.Module
@@ -105,7 +103,6 @@ func NewModules(
userRoleStore authtypes.UserRoleStore,
serviceAccount serviceaccount.Module,
cloudIntegrationModule cloudintegration.Module,
retentionGetter retention.Getter,
fl flagger.Flagger,
) Modules {
quickfilter := implquickfilter.NewModule(implquickfilter.NewStore(sqlstore))
@@ -122,7 +119,6 @@ func NewModules(
Dashboard: dashboard,
UserSetter: userSetter,
UserGetter: userGetter,
RetentionGetter: retentionGetter,
QuickFilter: quickfilter,
TraceFunnel: impltracefunnel.NewModule(impltracefunnel.NewStore(sqlstore)),
RawDataExport: implrawdataexport.NewModule(querier),

View File

@@ -16,7 +16,6 @@ import (
"github.com/SigNoz/signoz/pkg/modules/cloudintegration/implcloudintegration"
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
"github.com/SigNoz/signoz/pkg/modules/retention/implretention"
"github.com/SigNoz/signoz/pkg/modules/serviceaccount"
"github.com/SigNoz/signoz/pkg/modules/serviceaccount/implserviceaccount"
"github.com/SigNoz/signoz/pkg/modules/user/impluser"
@@ -57,8 +56,7 @@ func TestNewModules(t *testing.T) {
serviceAccount := implserviceaccount.NewModule(implserviceaccount.NewStore(sqlstore), nil, nil, nil, providerSettings, serviceaccount.Config{})
retentionGetter := implretention.NewGetter(implretention.NewStore(sqlstore))
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter, userRoleStore, serviceAccount, implcloudintegration.NewModule(), retentionGetter, flagger)
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter, userRoleStore, serviceAccount, implcloudintegration.NewModule(), flagger)
reflectVal := reflect.ValueOf(modules)
for i := 0; i < reflectVal.NumField(); i++ {

View File

@@ -28,8 +28,6 @@ import (
"github.com/SigNoz/signoz/pkg/identn/apikeyidentn"
"github.com/SigNoz/signoz/pkg/identn/impersonationidentn"
"github.com/SigNoz/signoz/pkg/identn/tokenizeridentn"
"github.com/SigNoz/signoz/pkg/meterreporter"
"github.com/SigNoz/signoz/pkg/meterreporter/noopmeterreporter"
"github.com/SigNoz/signoz/pkg/modules/authdomain/implauthdomain"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
@@ -321,12 +319,6 @@ func NewAuditorProviderFactories() factory.NamedMap[factory.ProviderFactory[audi
)
}
func NewMeterReporterProviderFactories() factory.NamedMap[factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config]] {
return factory.MustNewNamedMap(
noopmeterreporter.NewFactory(),
)
}
func NewFlaggerProviderFactories(registry featuretypes.Registry) factory.NamedMap[factory.ProviderFactory[flagger.FlaggerProvider, flagger.Config]] {
return factory.MustNewNamedMap(
configflagger.NewFactory(registry),

View File

@@ -22,13 +22,10 @@ import (
"github.com/SigNoz/signoz/pkg/identn"
"github.com/SigNoz/signoz/pkg/instrumentation"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/meterreporter"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
"github.com/SigNoz/signoz/pkg/modules/retention"
"github.com/SigNoz/signoz/pkg/modules/retention/implretention"
"github.com/SigNoz/signoz/pkg/modules/rulestatehistory"
"github.com/SigNoz/signoz/pkg/modules/serviceaccount"
"github.com/SigNoz/signoz/pkg/modules/serviceaccount/implserviceaccount"
@@ -87,7 +84,6 @@ type SigNoz struct {
Flagger flagger.Flagger
Gateway gateway.Gateway
Auditor auditor.Auditor
MeterReporter meterreporter.Reporter
}
func New(
@@ -108,7 +104,6 @@ func New(
dashboardModuleCallback func(sqlstore.SQLStore, factory.ProviderSettings, analytics.Analytics, organization.Getter, queryparser.QueryParser, querier.Querier, licensing.Licensing) dashboard.Module,
gatewayProviderFactory func(licensing.Licensing) factory.ProviderFactory[gateway.Gateway, gateway.Config],
auditorProviderFactories func(licensing.Licensing) factory.NamedMap[factory.ProviderFactory[auditor.Auditor, auditor.Config]],
meterReporterProviderFactories func(context.Context, factory.ProviderSettings, flagger.Flagger, licensing.Licensing, telemetrystore.TelemetryStore, retention.Getter, organization.Getter, zeus.Zeus) (factory.NamedMap[factory.ProviderFactory[meterreporter.Reporter, meterreporter.Config]], string),
querierHandlerCallback func(factory.ProviderSettings, querier.Querier, analytics.Analytics) querier.Handler,
cloudIntegrationCallback func(sqlstore.SQLStore, global.Global, zeus.Zeus, gateway.Gateway, licensing.Licensing, serviceaccount.Module, cloudintegration.Config) (cloudintegration.Module, error),
rulerProviderFactories func(cache.Cache, alertmanager.Alertmanager, sqlstore.SQLStore, telemetrystore.TelemetryStore, telemetrytypes.MetadataStore, prometheus.Prometheus, organization.Getter, rulestatehistory.Module, querier.Querier, queryparser.QueryParser) factory.NamedMap[factory.ProviderFactory[ruler.Ruler, ruler.Config]],
@@ -233,8 +228,6 @@ func New(
return nil, err
}
retentionGetter := implretention.NewGetter(implretention.NewStore(sqlstore))
// Initialize prometheus from the available prometheus provider factories
prometheus, err := factory.NewProviderFromNamedMap(
ctx,
@@ -393,13 +386,6 @@ func New(
return nil, err
}
// Initialize meter reporter from the variant-specific provider factories
meterReporterFactories, meterReporterProvider := meterReporterProviderFactories(ctx, providerSettings, flagger, licensing, telemetrystore, retentionGetter, orgGetter, zeus)
meterReporter, err := factory.NewProviderFromNamedMap(ctx, providerSettings, config.MeterReporter, meterReporterFactories, meterReporterProvider)
if err != nil {
return nil, err
}
// Initialize authns
store := sqlauthnstore.NewStore(sqlstore)
authNs, err := authNsCallback(ctx, providerSettings, store, licensing)
@@ -455,7 +441,7 @@ func New(
}
// Initialize all modules
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, analytics, querier, telemetrystore, telemetryMetadataStore, authNs, authz, cache, queryParser, config, dashboard, userGetter, userRoleStore, serviceAccount, cloudIntegrationModule, retentionGetter, flagger)
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, analytics, querier, telemetrystore, telemetryMetadataStore, authNs, authz, cache, queryParser, config, dashboard, userGetter, userRoleStore, serviceAccount, cloudIntegrationModule, flagger)
// Initialize ruler from the variant-specific provider factories
rulerInstance, err := factory.NewProviderFromNamedMap(ctx, providerSettings, config.Ruler, rulerProviderFactories(cache, alertmanager, sqlstore, telemetrystore, telemetryMetadataStore, prometheus, orgGetter, modules.RuleStateHistory, querier, queryParser), "signoz")
@@ -515,7 +501,6 @@ func New(
factory.NewNamedService(factory.MustNewName("authz"), authz),
factory.NewNamedService(factory.MustNewName("user"), userService, factory.MustNewName("authz")),
factory.NewNamedService(factory.MustNewName("auditor"), auditor),
factory.NewNamedService(factory.MustNewName("meterreporter"), meterReporter, factory.MustNewName("licensing")),
factory.NewNamedService(factory.MustNewName("ruler"), rulerInstance),
)
if err != nil {
@@ -565,6 +550,5 @@ func New(
Flagger: flagger,
Gateway: gateway,
Auditor: auditor,
MeterReporter: meterReporter,
}, nil
}

View File

@@ -25,17 +25,6 @@ func NewConditionBuilder(fm qbtypes.FieldMapper, fl flagger.Flagger) *conditionB
return &conditionBuilder{fm: fm, fl: fl}
}
// ftsMapExprs returns mapKeys and mapValues expressions for FTS matching on a map column.
// Non-string value types are wrapped with arrayMap(x -> toString(x), ...).
func ftsMapExprs(col *schema.Column) (keysExpr, valsExpr string) {
keysExpr = fmt.Sprintf("mapKeys(%s)", col.Name)
valsExpr = fmt.Sprintf("mapValues(%s)", col.Name)
if mc, ok := col.Type.(schema.MapColumnType); ok && mc.ValueType.GetType() != schema.ColumnTypeEnumString {
valsExpr = fmt.Sprintf("arrayMap(x -> toString(x), mapValues(%s))", col.Name)
}
return
}
func (c *conditionBuilder) conditionFor(
ctx context.Context,
startNs, endNs uint64,
@@ -129,24 +118,10 @@ func (c *conditionBuilder) conditionFor(
return sb.NotILike(fieldExpression, fmt.Sprintf("%%%s%%", value)), nil
case qbtypes.FilterOperatorRegexp:
if key.Name == querybuilder.FTSInternalKey {
rawVal := fmt.Sprintf("%v", value)
keysExpr, valsExpr := ftsMapExprs(columns[0])
keysCond := fmt.Sprintf(`arrayExists(x -> match(x, %s), %s)`, sb.Var(rawVal), keysExpr)
valsCond := fmt.Sprintf(`arrayExists(x -> match(x, %s), %s)`, sb.Var(rawVal), valsExpr)
return sb.Or(keysCond, valsCond), nil
}
// Note: Escape $$ to $$$$ to avoid sqlbuilder interpreting materialized $ signs
// Only needed because we are using sprintf instead of sb.Match (not implemented in sqlbuilder)
return fmt.Sprintf(`match(%s, %s)`, sqlbuilder.Escape(fieldExpression), sb.Var(value)), nil
case qbtypes.FilterOperatorNotRegexp:
if key.Name == querybuilder.FTSInternalKey {
rawVal := fmt.Sprintf("%v", value)
keysExpr, valsExpr := ftsMapExprs(columns[0])
keysCond := fmt.Sprintf(`arrayExists(x -> match(x, %s), %s)`, sb.Var(rawVal), keysExpr)
valsCond := fmt.Sprintf(`arrayExists(x -> match(x, %s), %s)`, sb.Var(rawVal), valsExpr)
return "NOT " + sb.Or(keysCond, valsCond), nil
}
// Note: Escape $$ to $$$$ to avoid sqlbuilder interpreting materialized $ signs
// Only needed because we are using sprintf instead of sb.Match (not implemented in sqlbuilder)
return fmt.Sprintf(`NOT match(%s, %s)`, sqlbuilder.Escape(fieldExpression), sb.Var(value)), nil
@@ -211,7 +186,7 @@ func (c *conditionBuilder) conditionFor(
column := columns[0]
if len(key.Evolutions) > 0 {
// we will use the corresponding column and its evolution entry for the query
newColumns, _, err := selectEvolutionsForColumns(columns, key.Evolutions, startNs, endNs)
newColumns, _, err := qbtypes.SelectEvolutionsForColumns(columns, key.Evolutions, startNs, endNs)
if err != nil {
return "", err
}
@@ -307,12 +282,6 @@ func (c *conditionBuilder) ConditionFor(
return "", err
}
// FTS wildcard conditions are self-contained (arrayExists over full map);
// no additional EXISTS wrapper is needed.
if key.Name == querybuilder.FTSInternalKey {
return condition, nil
}
// Skip adding exists filter for intrinsic fields i.e. Table level log context fields
buildExistCondition := operator.AddDefaultExistsFilter()
switch key.FieldContext {

View File

@@ -514,67 +514,6 @@ func TestConditionFor(t *testing.T) {
expectedSQL: "",
expectedError: qbtypes.ErrColumnNotFound,
},
// FTS wildcard (FTSInternalKey) cases
{
name: "FTS wildcard - attribute string REGEXP",
key: telemetrytypes.TelemetryFieldKey{
Name: "_X_INTERNAL_FTS_KEY",
FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
operator: qbtypes.FilterOperatorRegexp,
value: "error",
expectedSQL: "arrayExists(x -> match(x, ?), mapKeys(attributes_string)) OR arrayExists(x -> match(x, ?), mapValues(attributes_string))",
expectedArgs: []any{"error", "error"},
},
{
name: "FTS wildcard - attribute number REGEXP",
key: telemetrytypes.TelemetryFieldKey{
Name: "_X_INTERNAL_FTS_KEY",
FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeNumber,
},
operator: qbtypes.FilterOperatorRegexp,
value: "42",
expectedSQL: "arrayExists(x -> match(x, ?), mapKeys(attributes_number)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_number)))",
expectedArgs: []any{"42", "42"},
},
{
name: "FTS wildcard - attribute bool REGEXP",
key: telemetrytypes.TelemetryFieldKey{
Name: "_X_INTERNAL_FTS_KEY",
FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeBool,
},
operator: qbtypes.FilterOperatorRegexp,
value: "true",
expectedSQL: "arrayExists(x -> match(x, ?), mapKeys(attributes_bool)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_bool)))",
expectedArgs: []any{"true", "true"},
},
{
name: "FTS wildcard - resource string REGEXP",
key: telemetrytypes.TelemetryFieldKey{
Name: "_X_INTERNAL_FTS_KEY",
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
operator: qbtypes.FilterOperatorRegexp,
value: "my-service",
expectedSQL: "arrayExists(x -> match(x, ?), mapKeys(resources_string)) OR arrayExists(x -> match(x, ?), mapValues(resources_string))",
expectedArgs: []any{"my-service", "my-service"},
},
{
name: "FTS wildcard - NOT REGEXP",
key: telemetrytypes.TelemetryFieldKey{
Name: "_X_INTERNAL_FTS_KEY",
FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
operator: qbtypes.FilterOperatorNotRegexp,
value: "healthcheck",
expectedSQL: "NOT (arrayExists(x -> match(x, ?), mapKeys(attributes_string)) OR arrayExists(x -> match(x, ?), mapValues(attributes_string)))",
expectedArgs: []any{"healthcheck", "healthcheck"},
},
}
fl := flaggertest.New(t)
fm := NewFieldMapper(fl)

View File

@@ -4,7 +4,6 @@ import (
"fmt"
"github.com/SigNoz/signoz-otel-collector/constants"
"github.com/SigNoz/signoz/pkg/querybuilder"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
)
@@ -126,21 +125,6 @@ var (
Direction: qbtypes.OrderDirectionDesc,
},
}
// DefaultFTSFieldKeys is the ordered set of TelemetryFieldKey instances that
// search() fans out across. Intrinsic log columns use the normal conditionFor
// path; entries with Name==FTSInternalKey are short-circuited in conditionFor
// to emit arrayExists conditions over mapKeys/mapValues without arrayConcat.
DefaultFTSFieldKeys = []*telemetrytypes.TelemetryFieldKey{
{Name: LogsV2BodyColumn, Signal: telemetrytypes.SignalLogs, FieldContext: telemetrytypes.FieldContextLog, FieldDataType: telemetrytypes.FieldDataTypeString},
{Name: LogsV2SeverityTextColumn, Signal: telemetrytypes.SignalLogs, FieldContext: telemetrytypes.FieldContextLog, FieldDataType: telemetrytypes.FieldDataTypeString},
{Name: LogsV2TraceIDColumn, Signal: telemetrytypes.SignalLogs, FieldContext: telemetrytypes.FieldContextLog, FieldDataType: telemetrytypes.FieldDataTypeString},
{Name: LogsV2SpanIDColumn, Signal: telemetrytypes.SignalLogs, FieldContext: telemetrytypes.FieldContextLog, FieldDataType: telemetrytypes.FieldDataTypeString},
{Name: querybuilder.FTSInternalKey, Signal: telemetrytypes.SignalLogs, FieldContext: telemetrytypes.FieldContextAttribute, FieldDataType: telemetrytypes.FieldDataTypeString},
{Name: querybuilder.FTSInternalKey, Signal: telemetrytypes.SignalLogs, FieldContext: telemetrytypes.FieldContextAttribute, FieldDataType: telemetrytypes.FieldDataTypeNumber},
{Name: querybuilder.FTSInternalKey, Signal: telemetrytypes.SignalLogs, FieldContext: telemetrytypes.FieldContextAttribute, FieldDataType: telemetrytypes.FieldDataTypeBool},
{Name: querybuilder.FTSInternalKey, Signal: telemetrytypes.SignalLogs, FieldContext: telemetrytypes.FieldContextResource, FieldDataType: telemetrytypes.FieldDataTypeString},
}
)
func bodyAliasExpression(bodyJSONEnabled bool) string {

View File

@@ -3,17 +3,12 @@ package telemetrylogs
import (
"context"
"fmt"
"slices"
"sort"
"strconv"
"strings"
"time"
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
"github.com/SigNoz/signoz-otel-collector/utils"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/querybuilder"
"github.com/SigNoz/signoz/pkg/types/featuretypes"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
@@ -80,9 +75,6 @@ func NewFieldMapper(fl flagger.Flagger) qbtypes.FieldMapper {
func (m *fieldMapper) getColumn(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) ([]*schema.Column, error) {
switch key.FieldContext {
case telemetrytypes.FieldContextResource:
if key.Name == querybuilder.FTSInternalKey {
return []*schema.Column{logsV2Columns["resources_string"]}, nil
}
columns := []*schema.Column{logsV2Columns["resources_string"], logsV2Columns["resource"]}
return columns, nil
case telemetrytypes.FieldContextScope:
@@ -141,113 +133,6 @@ func (m *fieldMapper) getColumn(ctx context.Context, key *telemetrytypes.Telemet
return nil, qbtypes.ErrColumnNotFound
}
// selectEvolutionsForColumns selects the appropriate evolution entries for each column based on the time range.
// Logic:
// - Finds the latest base evolution (<= tsStartTime) across ALL columns
// - Rejects all evolutions before this latest base evolution
// - For duplicate evolutions it considers the oldest one (first in ReleaseTime)
// - For each column, includes its evolution if it's >= latest base evolution and <= tsEndTime
// - Results are sorted by ReleaseTime descending (newest first)
func selectEvolutionsForColumns(columns []*schema.Column, evolutions []*telemetrytypes.EvolutionEntry, tsStart, tsEnd uint64) ([]*schema.Column, []*telemetrytypes.EvolutionEntry, error) {
sortedEvolutions := make([]*telemetrytypes.EvolutionEntry, len(evolutions))
copy(sortedEvolutions, evolutions)
// sort the evolutions by ReleaseTime ascending
sort.Slice(sortedEvolutions, func(i, j int) bool {
return sortedEvolutions[i].ReleaseTime.Before(sortedEvolutions[j].ReleaseTime)
})
tsStartTime := time.Unix(0, int64(tsStart))
tsEndTime := time.Unix(0, int64(tsEnd))
// Build evolution map: column name -> evolution
evolutionMap := make(map[string]*telemetrytypes.EvolutionEntry)
for _, evolution := range sortedEvolutions {
if _, exists := evolutionMap[evolution.ColumnName+":"+evolution.FieldName+":"+strconv.Itoa(int(evolution.Version))]; exists {
// since if there is duplicate we would just use the oldest one.
continue
}
evolutionMap[evolution.ColumnName+":"+evolution.FieldName+":"+strconv.Itoa(int(evolution.Version))] = evolution
}
// Find the latest base evolution (<= tsStartTime) across ALL columns
// Evolutions are sorted, so we can break early
var latestBaseEvolutionAcrossAll *telemetrytypes.EvolutionEntry
for _, evolution := range sortedEvolutions {
if evolution.ReleaseTime.After(tsStartTime) {
break
}
latestBaseEvolutionAcrossAll = evolution
}
// We shouldn't reach this, it basically means there is something wrong with the evolutions data
if latestBaseEvolutionAcrossAll == nil {
return nil, nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "no base evolution found for columns %v", columns)
}
columnLookUpMap := make(map[string]*schema.Column)
for _, column := range columns {
columnLookUpMap[column.Name] = column
}
// Collect column-evolution pairs
type colEvoPair struct {
column *schema.Column
evolution *telemetrytypes.EvolutionEntry
}
pairs := []colEvoPair{}
for _, evolution := range evolutionMap {
// Reject evolutions before the latest base evolution
if evolution.ReleaseTime.Before(latestBaseEvolutionAcrossAll.ReleaseTime) {
continue
}
// skip evolutions after tsEndTime
if evolution.ReleaseTime.After(tsEndTime) || evolution.ReleaseTime.Equal(tsEndTime) {
continue
}
if _, exists := columnLookUpMap[evolution.ColumnName]; !exists {
return nil, nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "evolution column %s not found in columns %v", evolution.ColumnName, columns)
}
pairs = append(pairs, colEvoPair{columnLookUpMap[evolution.ColumnName], evolution})
}
// If no pairs found, fall back to latestBaseEvolutionAcrossAll for matching columns
if len(pairs) == 0 {
for _, column := range columns {
// Use latestBaseEvolutionAcrossAll if this column name matches its column name
if column.Name == latestBaseEvolutionAcrossAll.ColumnName {
pairs = append(pairs, colEvoPair{column, latestBaseEvolutionAcrossAll})
}
}
}
// Sort by ReleaseTime descending (newest first)
slices.SortFunc(pairs, func(a, b colEvoPair) int {
// Sort by ReleaseTime descending (newest first)
if a.evolution.ReleaseTime.After(b.evolution.ReleaseTime) {
return -1
}
if a.evolution.ReleaseTime.Before(b.evolution.ReleaseTime) {
return 1
}
return 0
})
// Extract results
newColumns := make([]*schema.Column, len(pairs))
evolutionsEntries := make([]*telemetrytypes.EvolutionEntry, len(pairs))
for i, pair := range pairs {
newColumns[i] = pair.column
evolutionsEntries[i] = pair.evolution
}
return newColumns, evolutionsEntries, nil
}
func (m *fieldMapper) FieldFor(ctx context.Context, tsStart, tsEnd uint64, key *telemetrytypes.TelemetryFieldKey) (string, error) {
columns, err := m.getColumn(ctx, key)
if err != nil {
@@ -258,7 +143,7 @@ func (m *fieldMapper) FieldFor(ctx context.Context, tsStart, tsEnd uint64, key *
var evolutionsEntries []*telemetrytypes.EvolutionEntry
if len(key.Evolutions) > 0 {
// we will use the corresponding column and its evolution entry for the query
newColumns, evolutionsEntries, err = selectEvolutionsForColumns(columns, key.Evolutions, tsStart, tsEnd)
newColumns, evolutionsEntries, err = qbtypes.SelectEvolutionsForColumns(columns, key.Evolutions, tsStart, tsEnd)
if err != nil {
return "", err
}

View File

@@ -536,390 +536,6 @@ func TestFieldForWithEvolutions(t *testing.T) {
}
}
func TestSelectEvolutionsForColumns(t *testing.T) {
testCases := []struct {
name string
columns []*schema.Column
evolutions []*telemetrytypes.EvolutionEntry
tsStart uint64
tsEnd uint64
expectedColumns []string // column names
expectedEvols []string // evolution column names
expectedError bool
errorStr string
}{
{
name: "New evolutions at tsStartTime - should include latest evolution",
columns: []*schema.Column{
logsV2Columns["resources_string"],
logsV2Columns["resource"],
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 0,
ReleaseTime: time.Date(0, 0, 0, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 1,
ReleaseTime: time.Date(2024, 2, 25, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 25, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 30, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{"resource"},
expectedEvols: []string{"resource"},
},
{
name: "New evolutions after tsStartTime but less than tsEndTime - should include both",
columns: []*schema.Column{
logsV2Columns["resources_string"],
logsV2Columns["resource"],
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 0,
ReleaseTime: time.Date(0, 0, 0, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 1,
ReleaseTime: time.Date(2024, 2, 3, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{"resource", "resources_string"}, // sorted by ReleaseTime desc
expectedEvols: []string{"resource", "resources_string"},
},
{
name: "Columns without matching evolutions - should exclude them",
columns: []*schema.Column{
logsV2Columns["resources_string"],
logsV2Columns["resource"], // no evolution for this
logsV2Columns["attributes_string"], // no evolution for this
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 0,
ReleaseTime: time.Date(2024, 1, 15, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{"resources_string"},
expectedEvols: []string{"resources_string"},
},
{
name: "New evolutions at tsEndTime - should not include new evolution",
columns: []*schema.Column{
logsV2Columns["resources_string"],
logsV2Columns["resource"],
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 0,
ReleaseTime: time.Date(0, 0, 0, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 1,
ReleaseTime: time.Date(2024, 2, 30, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 25, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 30, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{"resources_string"},
expectedEvols: []string{"resources_string"},
},
{
name: "New evolutions after tsEndTime - should exclude new",
columns: []*schema.Column{
logsV2Columns["resources_string"],
logsV2Columns["resource"],
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 0,
ReleaseTime: time.Date(0, 0, 0, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 1,
ReleaseTime: time.Date(2024, 2, 25, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{"resources_string"},
expectedEvols: []string{"resources_string"},
},
{
name: "Empty columns array",
columns: []*schema.Column{},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 0,
ReleaseTime: time.Date(2024, 1, 15, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{},
expectedEvols: []string{},
expectedError: true,
errorStr: "column resources_string not found",
},
{
name: "Duplicate evolutions - should use first encountered (oldest if sorted)",
columns: []*schema.Column{
logsV2Columns["resource"],
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 0,
ReleaseTime: time.Date(0, 0, 0, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 1,
ReleaseTime: time.Date(2024, 1, 15, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 1,
ReleaseTime: time.Date(2024, 1, 20, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{"resource"},
expectedEvols: []string{"resource"}, // should use first one (older)
},
{
name: "Genuine Duplicate evolutions with new version- should consider both",
columns: []*schema.Column{
logsV2Columns["resources_string"],
logsV2Columns["resource"],
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 0,
ReleaseTime: time.Date(0, 0, 0, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 1,
ReleaseTime: time.Date(2024, 1, 15, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 2,
ReleaseTime: time.Date(2024, 1, 20, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 1, 16, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{"resources_string", "resource"},
expectedEvols: []string{"resources_string", "resource"}, // should use first one (older)
},
{
name: "Evolution exactly at tsEndTime",
columns: []*schema.Column{
logsV2Columns["resources_string"],
logsV2Columns["resource"],
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: time.Date(2024, 1, 15, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC), // exactly at tsEnd
},
},
tsStart: uint64(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{"resources_string"}, // resource excluded because After(tsEnd) is true
expectedEvols: []string{"resources_string"},
},
{
name: "Single evolution after tsStartTime - JSON body",
columns: []*schema.Column{
logsV2Columns[LogsV2BodyV2Column],
logsV2Columns[LogsV2BodyPromotedColumn],
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: LogsV2BodyV2Column,
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextBody,
FieldName: "__all__",
ReleaseTime: time.Unix(0, 0),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: LogsV2BodyPromotedColumn,
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextBody,
FieldName: "user.name",
ReleaseTime: time.Date(2024, 2, 2, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{LogsV2BodyPromotedColumn, LogsV2BodyV2Column}, // sorted by ReleaseTime desc (newest first)
expectedEvols: []string{LogsV2BodyPromotedColumn, LogsV2BodyV2Column},
},
{
name: "No evolution after tsStartTime - JSON body",
columns: []*schema.Column{
logsV2Columns[LogsV2BodyV2Column],
logsV2Columns[LogsV2BodyPromotedColumn],
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: LogsV2BodyV2Column,
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextBody,
FieldName: "__all__",
ReleaseTime: time.Unix(0, 0),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: LogsV2BodyPromotedColumn,
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextBody,
FieldName: "user.name",
ReleaseTime: time.Date(2024, 2, 2, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 3, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{LogsV2BodyPromotedColumn},
expectedEvols: []string{LogsV2BodyPromotedColumn},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
resultColumns, resultEvols, err := selectEvolutionsForColumns(tc.columns, tc.evolutions, tc.tsStart, tc.tsEnd)
if tc.expectedError {
assert.Contains(t, err.Error(), tc.errorStr)
} else {
require.NoError(t, err)
assert.Equal(t, len(tc.expectedColumns), len(resultColumns), "column count mismatch")
assert.Equal(t, len(tc.expectedEvols), len(resultEvols), "evolution count mismatch")
resultColumnNames := make([]string, len(resultColumns))
for i, col := range resultColumns {
resultColumnNames[i] = col.Name
}
resultEvolNames := make([]string, len(resultEvols))
for i, evol := range resultEvols {
resultEvolNames[i] = evol.ColumnName
}
for i := range tc.expectedColumns {
assert.Equal(t, resultColumnNames[i], tc.expectedColumns[i], "expected column missing: "+tc.expectedColumns[i])
}
for i := range tc.expectedEvols {
assert.Equal(t, resultEvolNames[i], tc.expectedEvols[i], "expected evolution missing: "+tc.expectedEvols[i])
}
// Verify sorting: should be descending by ReleaseTime
for i := 0; i < len(resultEvols)-1; i++ {
assert.True(t, !resultEvols[i].ReleaseTime.Before(resultEvols[i+1].ReleaseTime),
"evolutions should be sorted descending by ReleaseTime")
}
}
})
}
}
func TestFieldForWithMaterialized(t *testing.T) {
ctx := context.Background()

View File

@@ -1,137 +0,0 @@
package telemetrylogs
import (
"context"
"strings"
"testing"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/flagger/flaggertest"
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
"github.com/SigNoz/signoz/pkg/querybuilder"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/huandu/go-sqlbuilder"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSearchFunctionFTS(t *testing.T) {
fl := flaggertest.New(t)
releaseTime := time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC)
ctx := context.Background()
fm := NewFieldMapper(fl)
cb := NewConditionBuilder(fm, fl)
keys := buildCompleteFieldKeyMap(releaseTime)
for _, field := range IntrinsicFields {
f := field
keys[field.Name] = append(keys[field.Name], &f)
}
startNs := uint64(releaseTime.Add(-1 * time.Hour).UnixNano())
endNs := uint64(releaseTime.Add(1 * time.Hour).UnixNano())
makeOpts := func(ftsKeys []*telemetrytypes.TelemetryFieldKey) querybuilder.FilterExprVisitorOpts {
return querybuilder.FilterExprVisitorOpts{
Context: ctx,
Logger: instrumentationtest.New().Logger(),
FieldMapper: fm,
ConditionBuilder: cb,
FieldKeys: keys,
JsonKeyToKey: GetBodyJSONKey,
StartNs: startNs,
EndNs: endNs,
FTSFieldKeys: ftsKeys,
}
}
t.Run("search quoted string fans out to all columns", func(t *testing.T) {
clause, err := querybuilder.PrepareWhereClause("search('error')", makeOpts(DefaultFTSFieldKeys))
require.NoError(t, err)
require.NotNil(t, clause)
sql, _ := clause.WhereClause.BuildWithFlavor(sqlbuilder.ClickHouse)
// Must touch all 8 targets: body, severity_text, trace_id, span_id, plus 4 map pairs
assert.Contains(t, sql, "match(LOWER(body), LOWER(?))")
assert.Contains(t, sql, "match(severity_text, ?)")
assert.Contains(t, sql, "match(trace_id, ?)")
assert.Contains(t, sql, "match(span_id, ?)")
assert.Contains(t, sql, "arrayExists(x -> match(x, ?), mapKeys(attributes_string))")
assert.Contains(t, sql, "arrayExists(x -> match(x, ?), mapValues(attributes_string))")
assert.Contains(t, sql, "arrayExists(x -> match(x, ?), mapKeys(attributes_number))")
assert.Contains(t, sql, "arrayExists(x -> match(x, ?), mapKeys(attributes_bool))")
assert.Contains(t, sql, "arrayExists(x -> match(x, ?), mapKeys(resources_string))")
})
t.Run("search unquoted token produces same result as quoted", func(t *testing.T) {
quoted, err := querybuilder.PrepareWhereClause("search('error')", makeOpts(DefaultFTSFieldKeys))
require.NoError(t, err)
unquoted, err := querybuilder.PrepareWhereClause("search(error)", makeOpts(DefaultFTSFieldKeys))
require.NoError(t, err)
sqlQ, argsQ := quoted.WhereClause.BuildWithFlavor(sqlbuilder.ClickHouse)
sqlU, argsU := unquoted.WhereClause.BuildWithFlavor(sqlbuilder.ClickHouse)
assert.Equal(t, sqlQ, sqlU)
assert.Equal(t, argsQ, argsU)
})
t.Run("NOT search wraps entire condition", func(t *testing.T) {
clause, err := querybuilder.PrepareWhereClause("NOT search('healthcheck')", makeOpts(DefaultFTSFieldKeys))
require.NoError(t, err)
require.NotNil(t, clause)
sql, _ := clause.WhereClause.BuildWithFlavor(sqlbuilder.ClickHouse)
assert.Contains(t, sql, "NOT (")
})
t.Run("search combined with other filter", func(t *testing.T) {
clause, err := querybuilder.PrepareWhereClause("search('error') AND severity_text = 'ERROR'", makeOpts(DefaultFTSFieldKeys))
require.NoError(t, err)
require.NotNil(t, clause)
sql, _ := clause.WhereClause.BuildWithFlavor(sqlbuilder.ClickHouse)
assert.Contains(t, sql, "match(LOWER(body), LOWER(?))")
assert.Contains(t, sql, "severity_text = ?")
})
t.Run("search invalid regex is escaped as literal", func(t *testing.T) {
clause, err := querybuilder.PrepareWhereClause("search('[ERROR-1234]')", makeOpts(DefaultFTSFieldKeys))
require.NoError(t, err)
require.NotNil(t, clause)
_, args := clause.WhereClause.BuildWithFlavor(sqlbuilder.ClickHouse)
// FormatFullTextSearch escapes invalid regex — all args should be the escaped form
for _, arg := range args {
if s, ok := arg.(string); ok {
assert.Equal(t, `\[ERROR-1234\]`, s)
}
}
})
t.Run("search with FTSColumnKeys nil returns error", func(t *testing.T) {
_, err := querybuilder.PrepareWhereClause("search('error')", makeOpts(nil))
require.Error(t, err)
_, _, _, _, _, additionals := errors.Unwrapb(err)
found := false
for _, a := range additionals {
if strings.Contains(a, "search() is only supported for log queries") {
found = true
break
}
}
assert.True(t, found, "expected 'only supported for log queries' error, got: %v", additionals)
})
t.Run("search with window exceeding 6 hours returns error", func(t *testing.T) {
opts := makeOpts(DefaultFTSFieldKeys)
opts.StartNs = uint64(releaseTime.UnixNano())
opts.EndNs = uint64(releaseTime.Add(7 * time.Hour).UnixNano())
_, err := querybuilder.PrepareWhereClause("search('error')", opts)
require.Error(t, err)
_, _, _, _, _, additionals := errors.Unwrapb(err)
found := false
for _, a := range additionals {
if strings.Contains(a, "6-hour") {
found = true
break
}
}
assert.True(t, found, "expected 6-hour window error, got: %v", additionals)
})
}

View File

@@ -100,7 +100,7 @@ func TestFilterExprLogs(t *testing.T) {
category: "Single word",
query: "<script>alert('xss')</script>",
shouldPass: false,
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got '<'",
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got '<'",
},
// Single word searches with spaces
@@ -166,7 +166,7 @@ func TestFilterExprLogs(t *testing.T) {
category: "Special characters",
query: "[tracing]",
shouldPass: false,
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got '['",
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got '['",
},
{
category: "Special characters",
@@ -196,7 +196,7 @@ func TestFilterExprLogs(t *testing.T) {
category: "Special characters",
query: "ERROR: cannot execute update() in a read-only context",
shouldPass: false,
expectedErrorContains: "expecting one of {(, AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got ')'",
expectedErrorContains: "expecting one of {(, AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got ')'",
},
{
category: "Special characters",
@@ -618,7 +618,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: []any{},
expectedErrorContains: "expecting one of {(, ), FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'and'",
expectedErrorContains: "expecting one of {(, ), FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'and'",
},
{
category: "Keyword conflict",
@@ -626,7 +626,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: []any{},
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'or'",
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'or'",
},
{
category: "Keyword conflict",
@@ -634,7 +634,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: []any{},
expectedErrorContains: "expecting one of {(, ), FREETEXT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got EOF",
expectedErrorContains: "expecting one of {(, ), FREETEXT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got EOF",
},
{
category: "Keyword conflict",
@@ -642,7 +642,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: []any{},
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'like'",
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'like'",
},
{
category: "Keyword conflict",
@@ -650,7 +650,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: []any{},
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'between'",
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'between'",
},
{
category: "Keyword conflict",
@@ -658,7 +658,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: []any{},
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'in'",
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'in'",
},
{
category: "Keyword conflict",
@@ -666,7 +666,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: []any{},
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'exists'",
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'exists'",
},
{
category: "Keyword conflict",
@@ -674,7 +674,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: []any{},
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'regexp'",
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'regexp'",
},
{
category: "Keyword conflict",
@@ -682,7 +682,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: []any{},
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'contains'",
expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'contains'",
},
{
category: "Keyword conflict",
@@ -2018,9 +2018,9 @@ func TestFilterExprLogs(t *testing.T) {
expectedErrorContains: "",
},
{category: "Only keywords", query: "AND", shouldPass: false, expectedErrorContains: "expecting one of {(, ), FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'AND'"},
{category: "Only keywords", query: "OR", shouldPass: false, expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'OR'"},
{category: "Only keywords", query: "NOT", shouldPass: false, expectedErrorContains: "expecting one of {(, ), FREETEXT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got EOF"},
{category: "Only keywords", query: "AND", shouldPass: false, expectedErrorContains: "expecting one of {(, ), FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'AND'"},
{category: "Only keywords", query: "OR", shouldPass: false, expectedErrorContains: "expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'OR'"},
{category: "Only keywords", query: "NOT", shouldPass: false, expectedErrorContains: "expecting one of {(, ), FREETEXT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got EOF"},
{category: "Only functions", query: "has", shouldPass: false, expectedErrorContains: "expecting one of {(, )} but got EOF"},
{category: "Only functions", query: "hasAny", shouldPass: false, expectedErrorContains: "expecting one of {(, )} but got EOF"},
@@ -2162,7 +2162,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: nil,
expectedErrorContains: "line 1:0 expecting one of {(, ), FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'and'",
expectedErrorContains: "line 1:0 expecting one of {(, ), FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'and'",
},
{
category: "Operator keywords as keys",
@@ -2170,7 +2170,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: nil,
expectedErrorContains: "line 1:0 expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'or'",
expectedErrorContains: "line 1:0 expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'or'",
},
{
category: "Operator keywords as keys",
@@ -2178,7 +2178,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: nil,
expectedErrorContains: "line 1:3 expecting one of {(, ), FREETEXT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got '='",
expectedErrorContains: "line 1:3 expecting one of {(, ), FREETEXT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got '='",
},
{
category: "Operator keywords as keys",
@@ -2186,7 +2186,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: nil,
expectedErrorContains: "line 1:0 expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'between'",
expectedErrorContains: "line 1:0 expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'between'",
},
{
category: "Operator keywords as keys",
@@ -2194,7 +2194,7 @@ func TestFilterExprLogs(t *testing.T) {
shouldPass: false,
expectedQuery: "",
expectedArgs: nil,
expectedErrorContains: "line 1:0 expecting one of {(, ), AND, FREETEXT, NOT, SEARCH, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'in'",
expectedErrorContains: "line 1:0 expecting one of {(, ), AND, FREETEXT, NOT, boolean, has(), hasAll(), hasAny(), hasToken(), number, quoted text} but got 'in'",
},
// Using function keywords as keys

View File

@@ -663,11 +663,11 @@ func (b *logQueryStatementBuilder) addFilterCondition(
FieldKeys: keys,
BodyJSONEnabled: bodyJSONEnabled,
SkipResourceFilter: true,
FullTextColumn: b.fullTextColumn,
JsonKeyToKey: b.jsonKeyToKey,
Variables: variables,
StartNs: start,
EndNs: end,
FTSFieldKeys: DefaultFTSFieldKeys,
})
if err != nil {

View File

@@ -2,7 +2,6 @@ package telemetrylogs
import (
"context"
"fmt"
"testing"
"time"
@@ -365,7 +364,7 @@ func TestStatementBuilderListQueryResourceTests(t *testing.T) {
expectedErr error
}{
{
name: "List with full text search exceeds 6h window",
name: "List with full text search",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
@@ -374,7 +373,11 @@ func TestStatementBuilderListQueryResourceTests(t *testing.T) {
},
Limit: 10,
},
expectedErr: fmt.Errorf("parsing the search expression"),
expected: qbtypes.Statement{
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE match(LOWER(body), LOWER(?)) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"hello", "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
},
expectedErr: nil,
},
{
name: "list query with mat col order by",
@@ -382,7 +385,7 @@ func TestStatementBuilderListQueryResourceTests(t *testing.T) {
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
Filter: &qbtypes.Filter{
Expression: "service.name = 'cartservice'",
Expression: "service.name = 'cartservice' hello",
},
Limit: 10,
Order: []qbtypes.OrderBy{
@@ -399,8 +402,8 @@ func TestStatementBuilderListQueryResourceTests(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? ORDER BY `attribute_string_materialized$$key$$name` AS `materialized.key.name` desc LIMIT ?",
Args: []any{"cartservice", "%service.name%", "%service.name\":\"cartservice%", uint64(1747945619), uint64(1747983448), "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND match(LOWER(body), LOWER(?)) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? ORDER BY `attribute_string_materialized$$key$$name` AS `materialized.key.name` desc LIMIT ?",
Args: []any{"cartservice", "%service.name%", "%service.name\":\"cartservice%", uint64(1747945619), uint64(1747983448), "hello", "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
},
expectedErr: nil,
},
@@ -1034,7 +1037,7 @@ func TestStmtBuilderBodyField(t *testing.T) {
}
}
func TestStmtBuilderFTS(t *testing.T) {
func TestStmtBuilderBodyFullTextSearch(t *testing.T) {
cases := []struct {
name string
requestType qbtypes.RequestType
@@ -1042,9 +1045,6 @@ func TestStmtBuilderFTS(t *testing.T) {
enableUseJSONBody bool
expected qbtypes.Statement
expectedErr error
// optional per-case time window (ms); zero → use default 1747947419000/1747983448000
startMs uint64
endMs uint64
}{
{
name: "fts",
@@ -1055,11 +1055,10 @@ func TestStmtBuilderFTS(t *testing.T) {
Limit: 10,
},
enableUseJSONBody: true,
startMs: 1705309200000,
endMs: 1705316400000,
expected: qbtypes.Statement{
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body_v2 as body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE (match(LOWER(body_v2.message), LOWER(?)) OR match(severity_text, ?) OR match(trace_id, ?) OR match(span_id, ?) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_string)) OR arrayExists(x -> match(x, ?), mapValues(attributes_string))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_number)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_number)))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_bool)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_bool)))) OR (arrayExists(x -> match(x, ?), mapKeys(resources_string)) OR arrayExists(x -> match(x, ?), mapValues(resources_string)))) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "1705309200000000000", uint64(1705307400), "1705316400000000000", uint64(1705316400), 10},
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body_v2 as body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE match(LOWER(body_v2.message), LOWER(?)) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"error", "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
Warnings: []string{querybuilder.BodyFullTextSearchDefaultWarning},
},
expectedErr: nil,
},
@@ -1072,16 +1071,15 @@ func TestStmtBuilderFTS(t *testing.T) {
Limit: 10,
},
enableUseJSONBody: true,
startMs: 1705309200000,
endMs: 1705316400000,
expected: qbtypes.Statement{
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body_v2 as body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE (match(LOWER(body_v2.message), LOWER(?)) OR match(severity_text, ?) OR match(trace_id, ?) OR match(span_id, ?) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_string)) OR arrayExists(x -> match(x, ?), mapValues(attributes_string))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_number)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_number)))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_bool)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_bool)))) OR (arrayExists(x -> match(x, ?), mapKeys(resources_string)) OR arrayExists(x -> match(x, ?), mapValues(resources_string)))) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "1705309200000000000", uint64(1705307400), "1705316400000000000", uint64(1705316400), 10},
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body_v2 as body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE match(LOWER(body_v2.message), LOWER(?)) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"error", "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
Warnings: []string{querybuilder.BodyFullTextSearchDefaultWarning},
},
expectedErr: nil,
},
{
name: "fts_json_disabled",
name: "fts_disabled",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
@@ -1089,91 +1087,12 @@ func TestStmtBuilderFTS(t *testing.T) {
Limit: 10,
},
enableUseJSONBody: false,
startMs: 1705309200000,
endMs: 1705316400000,
expected: qbtypes.Statement{
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE (match(LOWER(body), LOWER(?)) OR match(severity_text, ?) OR match(trace_id, ?) OR match(span_id, ?) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_string)) OR arrayExists(x -> match(x, ?), mapValues(attributes_string))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_number)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_number)))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_bool)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_bool)))) OR (arrayExists(x -> match(x, ?), mapKeys(resources_string)) OR arrayExists(x -> match(x, ?), mapValues(resources_string)))) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "1705309200000000000", uint64(1705307400), "1705316400000000000", uint64(1705316400), 10},
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE match(LOWER(body), LOWER(?)) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"error", "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10},
},
expectedErr: nil,
},
// search() function: uses a 2-hour window to stay under the 6-hour limit
{
name: "search_fans_out_to_all_columns",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
Filter: &qbtypes.Filter{Expression: "search('error')"},
Limit: 10,
},
enableUseJSONBody: false,
startMs: 1705309200000,
endMs: 1705316400000,
expected: qbtypes.Statement{
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE (match(LOWER(body), LOWER(?)) OR match(severity_text, ?) OR match(trace_id, ?) OR match(span_id, ?) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_string)) OR arrayExists(x -> match(x, ?), mapValues(attributes_string))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_number)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_number)))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_bool)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_bool)))) OR (arrayExists(x -> match(x, ?), mapKeys(resources_string)) OR arrayExists(x -> match(x, ?), mapValues(resources_string)))) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "1705309200000000000", uint64(1705307400), "1705316400000000000", uint64(1705316400), 10},
},
},
{
name: "search_unquoted_token",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
Filter: &qbtypes.Filter{Expression: "search(error)"},
Limit: 10,
},
enableUseJSONBody: false,
startMs: 1705309200000,
endMs: 1705316400000,
expected: qbtypes.Statement{
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE (match(LOWER(body), LOWER(?)) OR match(severity_text, ?) OR match(trace_id, ?) OR match(span_id, ?) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_string)) OR arrayExists(x -> match(x, ?), mapValues(attributes_string))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_number)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_number)))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_bool)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_bool)))) OR (arrayExists(x -> match(x, ?), mapKeys(resources_string)) OR arrayExists(x -> match(x, ?), mapValues(resources_string)))) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "1705309200000000000", uint64(1705307400), "1705316400000000000", uint64(1705316400), 10},
},
},
{
name: "search_not_wraps_condition",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
Filter: &qbtypes.Filter{Expression: "NOT search('healthcheck')"},
Limit: 10,
},
enableUseJSONBody: false,
startMs: 1705309200000,
endMs: 1705316400000,
expected: qbtypes.Statement{
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE NOT ((match(LOWER(body), LOWER(?)) OR match(severity_text, ?) OR match(trace_id, ?) OR match(span_id, ?) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_string)) OR arrayExists(x -> match(x, ?), mapValues(attributes_string))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_number)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_number)))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_bool)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_bool)))) OR (arrayExists(x -> match(x, ?), mapKeys(resources_string)) OR arrayExists(x -> match(x, ?), mapValues(resources_string))))) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"healthcheck", "healthcheck", "healthcheck", "healthcheck", "healthcheck", "healthcheck", "healthcheck", "healthcheck", "healthcheck", "healthcheck", "healthcheck", "healthcheck", "1705309200000000000", uint64(1705307400), "1705316400000000000", uint64(1705316400), 10},
},
},
{
name: "search_combined_with_filter",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
Filter: &qbtypes.Filter{Expression: "search('error') AND severity_text = 'ERROR'"},
Limit: 10,
},
enableUseJSONBody: false,
startMs: 1705309200000,
endMs: 1705316400000,
expected: qbtypes.Statement{
Query: "SELECT timestamp, id, trace_id, span_id, trace_flags, severity_text, severity_number, scope_name, scope_version, body, attributes_string, attributes_number, attributes_bool, resources_string, scope_string FROM signoz_logs.distributed_logs_v2 WHERE ((match(LOWER(body), LOWER(?)) OR match(severity_text, ?) OR match(trace_id, ?) OR match(span_id, ?) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_string)) OR arrayExists(x -> match(x, ?), mapValues(attributes_string))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_number)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_number)))) OR (arrayExists(x -> match(x, ?), mapKeys(attributes_bool)) OR arrayExists(x -> match(x, ?), arrayMap(x -> toString(x), mapValues(attributes_bool)))) OR (arrayExists(x -> match(x, ?), mapKeys(resources_string)) OR arrayExists(x -> match(x, ?), mapValues(resources_string)))) AND severity_text = ?) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{"error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "error", "ERROR", "1705309200000000000", uint64(1705307400), "1705316400000000000", uint64(1705316400), 10},
},
},
{
// default window is ~10h which exceeds the 6-hour search() limit
name: "search_window_exceeds_6h",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
Filter: &qbtypes.Filter{Expression: "search('error')"},
Limit: 10,
},
enableUseJSONBody: false,
expectedErr: fmt.Errorf("6-hour"),
},
}
for _, c := range cases {
@@ -1199,24 +1118,19 @@ func TestStmtBuilderFTS(t *testing.T) {
fl,
)
startMs := uint64(1747947419000)
if c.startMs != 0 {
startMs = c.startMs
}
endMs := uint64(1747983448000)
if c.endMs != 0 {
endMs = c.endMs
}
q, err := statementBuilder.Build(context.Background(), startMs, endMs, c.requestType, c.query, nil)
q, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, c.requestType, c.query, nil)
if c.expectedErr != nil {
require.Error(t, err)
require.Contains(t, err.Error(), c.expectedErr.Error())
} else {
require.NoError(t, err)
if c.expected.Query != "" {
require.Equal(t, c.expected.Query, q.Query)
require.Equal(t, c.expected.Args, q.Args)
require.Equal(t, c.expected.Warnings, q.Warnings)
if err != nil {
_, _, _, _, _, add := errors.Unwrapb(err)
t.Logf("error additionals: %v", add)
}
require.NoError(t, err)
require.Equal(t, c.expected.Query, q.Query)
require.Equal(t, c.expected.Args, q.Args)
require.Equal(t, c.expected.Warnings, q.Warnings)
}
})
}

View File

@@ -344,6 +344,11 @@ func (t *telemetryMetaStore) getTracesKeys(ctx context.Context, fieldKeySelector
})
}
}
if err = t.updateColumnEvolutionMetadataForKeys(ctx, keys); err != nil {
return nil, false, err
}
return keys, complete, nil
}
@@ -689,7 +694,7 @@ func (t *telemetryMetaStore) getLogsKeys(ctx context.Context, fieldKeySelectors
}
}
if _, err := t.updateColumnEvolutionMetadataForKeys(ctx, keys); err != nil {
if err := t.updateColumnEvolutionMetadataForKeys(ctx, keys); err != nil {
return nil, false, err
}
@@ -2370,8 +2375,8 @@ func (k *telemetryMetaStore) fetchEvolutionEntryFromClickHouse(ctx context.Conte
return entries, nil
}
// Get retrieves all evolutions for the given selectors from DB.
func (k *telemetryMetaStore) updateColumnEvolutionMetadataForKeys(ctx context.Context, keysToUpdate []*telemetrytypes.TelemetryFieldKey) (map[string][]*telemetrytypes.EvolutionEntry, error) {
// updateColumnEvolutionMetadataForKeys updates the evolution field for keys.
func (k *telemetryMetaStore) updateColumnEvolutionMetadataForKeys(ctx context.Context, keysToUpdate []*telemetrytypes.TelemetryFieldKey) error {
var metadataKeySelectors []*telemetrytypes.EvolutionSelector
for _, keySelector := range keysToUpdate {
@@ -2385,7 +2390,7 @@ func (k *telemetryMetaStore) updateColumnEvolutionMetadataForKeys(ctx context.Co
evolutions, err := k.fetchEvolutionEntryFromClickHouse(ctx, metadataKeySelectors)
if err != nil {
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to fetch evolution from clickhouse %s", err.Error())
return errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to fetch evolution from clickhouse %s", err.Error())
}
evolutionsByUniqueKey := make(map[string][]*telemetrytypes.EvolutionEntry)
@@ -2416,7 +2421,7 @@ func (k *telemetryMetaStore) updateColumnEvolutionMetadataForKeys(ctx context.Co
}
}
}
return evolutionsByUniqueKey, nil
return nil
}
// chunkSizeFirstSeenMetricMetadata limits the number of tuples per SQL query to avoid hitting the max_query_size limit.

View File

@@ -6,6 +6,7 @@ import (
"testing"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/flagger/flaggertest"
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
"github.com/SigNoz/signoz/pkg/telemetryaudit"
"github.com/SigNoz/signoz/pkg/telemetrylogs"
@@ -17,7 +18,6 @@ import (
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
cmock "github.com/srikanthccv/ClickHouse-go-mock"
"github.com/stretchr/testify/assert"
"github.com/SigNoz/signoz/pkg/flagger/flaggertest"
"github.com/stretchr/testify/require"
)
@@ -89,6 +89,19 @@ func TestGetKeys(t *testing.T) {
{Name: "tag_data_type", Type: "String"},
{Name: "priority", Type: "UInt8"},
}, [][]any{{"http.method", "tag", "String", 1}, {"http.method", "tag", "String", 1}}))
mock.ExpectQuery(`FROM signoz_metadata\.distributed_column_evolution_metadata`).
WithArgs(nil, nil, nil, nil, nil, nil, nil, nil).
WillReturnRows(cmock.NewRows([]cmock.ColumnType{
{Name: "signal", Type: "String"},
{Name: "column_name", Type: "String"},
{Name: "column_type", Type: "String"},
{Name: "field_context", Type: "String"},
{Name: "field_name", Type: "String"},
{Name: "version", Type: "UInt32"},
{Name: "release_time", Type: "Float64"},
}, [][]any{}))
keys, _, err := metadata.GetKeys(context.Background(), &telemetrytypes.FieldKeySelector{
Signal: telemetrytypes.SignalTraces,
FieldContext: telemetrytypes.FieldContextSpan,
@@ -247,6 +260,27 @@ func TestApplyBackwardCompatibleKeys(t *testing.T) {
}, rows))
}
// getTracesKeys / getLogsKeys both fetch evolution metadata; return an empty
// result so the existing test data flows through unchanged. Each input key
// becomes one selector contributing four bound args.
if hasTraces || hasLogs {
evoArgs := make([]any, 0, len(tt.inputKeys)*4)
for range tt.inputKeys {
evoArgs = append(evoArgs, nil, nil, nil, nil)
}
mock.ExpectQuery(`FROM signoz_metadata\.distributed_column_evolution_metadata`).
WithArgs(evoArgs...).
WillReturnRows(cmock.NewRows([]cmock.ColumnType{
{Name: "signal", Type: "String"},
{Name: "column_name", Type: "String"},
{Name: "column_type", Type: "String"},
{Name: "field_context", Type: "String"},
{Name: "field_name", Type: "String"},
{Name: "version", Type: "UInt32"},
{Name: "release_time", Type: "Float64"},
}, [][]any{}))
}
selectors := []*telemetrytypes.FieldKeySelector{}
for _, key := range tt.inputKeys {
selectors = append(selectors, &telemetrytypes.FieldKeySelector{

View File

@@ -161,7 +161,33 @@ func (c *conditionBuilder) conditionFor(
case qbtypes.FilterOperatorExists, qbtypes.FilterOperatorNotExists:
var value any
switch columns[0].Type.GetType() {
column := columns[0]
if len(key.Evolutions) > 0 {
// we will use the corresponding column and its evolution entry for the query
newColumns, _, err := qbtypes.SelectEvolutionsForColumns(columns, key.Evolutions, startNs, endNs)
if err != nil {
return "", err
}
if len(newColumns) == 0 {
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "no valid evolution found for field %s in the given time range", key.Name)
}
// Multiple columns means fieldExpression is a multiIf returning NULL when none match,
// so a simple null check is sufficient.
if len(newColumns) > 1 {
if operator == qbtypes.FilterOperatorExists {
return sb.IsNotNull(fieldExpression), nil
} else {
return sb.IsNull(fieldExpression), nil
}
}
// otherwise we have to find the correct exist operator based on the column type
column = newColumns[0]
}
switch column.Type.GetType() {
case schema.ColumnTypeEnumJSON:
if operator == qbtypes.FilterOperatorExists {
return sb.IsNotNull(fieldExpression), nil
@@ -178,7 +204,7 @@ func (c *conditionBuilder) conditionFor(
return sb.E(fieldExpression, value), nil
}
case schema.ColumnTypeEnumLowCardinality:
switch elementType := columns[0].Type.(schema.LowCardinalityColumnType).ElementType; elementType.GetType() {
switch elementType := column.Type.(schema.LowCardinalityColumnType).ElementType; elementType.GetType() {
case schema.ColumnTypeEnumString:
value = ""
if operator == qbtypes.FilterOperatorExists {
@@ -202,14 +228,14 @@ func (c *conditionBuilder) conditionFor(
return sb.E(fieldExpression, value), nil
}
case schema.ColumnTypeEnumMap:
keyType := columns[0].Type.(schema.MapColumnType).KeyType
keyType := column.Type.(schema.MapColumnType).KeyType
if _, ok := keyType.(schema.LowCardinalityColumnType); !ok {
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "key type %s is not supported for map column type %s", keyType, columns[0].Type)
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "key type %s is not supported for map column type %s", keyType, column.Type)
}
switch valueType := columns[0].Type.(schema.MapColumnType).ValueType; valueType.GetType() {
switch valueType := column.Type.(schema.MapColumnType).ValueType; valueType.GetType() {
case schema.ColumnTypeEnumString, schema.ColumnTypeEnumBool, schema.ColumnTypeEnumFloat64:
leftOperand := fmt.Sprintf("mapContains(%s, '%s')", columns[0].Name, key.Name)
leftOperand := fmt.Sprintf("mapContains(%s, '%s')", column.Name, key.Name)
if key.Materialized {
leftOperand = telemetrytypes.FieldKeyToMaterializedColumnNameForExists(key)
}
@@ -222,7 +248,7 @@ func (c *conditionBuilder) conditionFor(
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "exists operator is not supported for map column type %s", valueType)
}
default:
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "exists operator is not supported for column type %s", columns[0].Type)
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "exists operator is not supported for column type %s", column.Type)
}
}
return "", nil

View File

@@ -3,6 +3,7 @@ package telemetrytraces
import (
"context"
"testing"
"time"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
@@ -14,6 +15,7 @@ import (
func TestConditionFor(t *testing.T) {
ctx := context.Background()
mockEvolution := mockEvolutionData(time.Date(2025, 10, 26, 0, 10, 0, 0, time.UTC))
testCases := []struct {
name string
key telemetrytypes.TelemetryFieldKey
@@ -213,6 +215,7 @@ func TestConditionFor(t *testing.T) {
Name: "service.name",
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
Evolutions: mockEvolution,
},
operator: qbtypes.FilterOperatorExists,
value: nil,
@@ -225,6 +228,7 @@ func TestConditionFor(t *testing.T) {
Name: "service.name",
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
Evolutions: mockEvolution,
},
operator: qbtypes.FilterOperatorNotExists,
value: nil,
@@ -302,3 +306,85 @@ func TestConditionFor(t *testing.T) {
})
}
}
func TestConditionForResourceWithEvolution(t *testing.T) {
ctx := context.Background()
releaseTime := time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC)
evolutions := mockEvolutionData(releaseTime)
testCases := []struct {
name string
key telemetrytypes.TelemetryFieldKey
operator qbtypes.FilterOperator
tsStart uint64
tsEnd uint64
expectedSQL string
}{
{
name: "Exists - window after release - JSON only",
key: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
Evolutions: evolutions,
},
operator: qbtypes.FilterOperatorExists,
tsStart: uint64(time.Date(2025, 6, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2025, 7, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedSQL: "WHERE resource.`service.name`::String IS NOT NULL",
},
{
name: "NotExists - window after release - JSON only",
key: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
Evolutions: evolutions,
},
operator: qbtypes.FilterOperatorNotExists,
tsStart: uint64(time.Date(2025, 6, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2025, 7, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedSQL: "WHERE resource.`service.name`::String IS NULL",
},
{
name: "Exists - window before release - map only",
key: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
Evolutions: evolutions,
},
operator: qbtypes.FilterOperatorExists,
tsStart: uint64(time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 6, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedSQL: "WHERE mapContains(resources_string, 'service.name') = ?",
},
{
name: "Exists - window straddles release - multiIf null check",
key: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
Evolutions: evolutions,
},
operator: qbtypes.FilterOperatorExists,
tsStart: uint64(time.Date(2024, 6, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2025, 6, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedSQL: "WHERE multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) IS NOT NULL",
},
}
fm := NewFieldMapper()
conditionBuilder := NewConditionBuilder(fm)
for _, tc := range testCases {
sb := sqlbuilder.NewSelectBuilder()
t.Run(tc.name, func(t *testing.T) {
cond, err := conditionBuilder.ConditionFor(ctx, tc.tsStart, tc.tsEnd, &tc.key, tc.operator, nil, sb)
require.NoError(t, err)
sb.Where(cond)
sql, _ := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
assert.Contains(t, sql, tc.expectedSQL)
})
}
}

View File

@@ -1,6 +1,8 @@
package telemetrytraces
import "github.com/SigNoz/signoz/pkg/types/telemetrytypes"
import (
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
)
var (
IntrinsicFields = map[string]telemetrytypes.TelemetryFieldKey{

View File

@@ -174,7 +174,7 @@ func (m *defaultFieldMapper) getColumn(
) ([]*schema.Column, error) {
switch key.FieldContext {
case telemetrytypes.FieldContextResource:
return []*schema.Column{indexV3Columns["resource"]}, nil
return []*schema.Column{indexV3Columns["resources_string"], indexV3Columns["resource"]}, nil
case telemetrytypes.FieldContextScope:
return []*schema.Column{}, qbtypes.ErrColumnNotFound
case telemetrytypes.FieldContextAttribute:
@@ -254,63 +254,92 @@ func (m *defaultFieldMapper) FieldFor(
if err != nil {
return "", err
}
if len(columns) != 1 {
return "", errors.Newf(errors.TypeInternal, errors.CodeInternal, "expected exactly 1 column, got %d", len(columns))
var newColumns []*schema.Column
var evolutionsEntries []*telemetrytypes.EvolutionEntry
if len(key.Evolutions) > 0 {
// we will use the corresponding column and its evolution entry for the query
newColumns, evolutionsEntries, err = qbtypes.SelectEvolutionsForColumns(columns, key.Evolutions, startNs, endNs)
if err != nil {
return "", err
}
} else {
newColumns = columns
}
column := columns[0]
switch column.Type.GetType() {
case schema.ColumnTypeEnumJSON:
// json is only supported for resource context as of now
if key.FieldContext != telemetrytypes.FieldContextResource {
return "", errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "only resource context fields are supported for json columns, got %s", key.FieldContext.String)
}
oldColumn := indexV3Columns["resources_string"]
oldKeyName := fmt.Sprintf("%s['%s']", oldColumn.Name, key.Name)
// have to add ::string as clickHouse throws an error :- data types Variant/Dynamic are not allowed in GROUP BY
// once clickHouse dependency is updated, we need to check if we can remove it.
if key.Materialized {
oldKeyName = telemetrytypes.FieldKeyToMaterializedColumnName(key)
oldKeyNameExists := telemetrytypes.FieldKeyToMaterializedColumnNameForExists(key)
return fmt.Sprintf("multiIf(%s.`%s` IS NOT NULL, %s.`%s`::String, %s==true, %s, NULL)", column.Name, key.Name, column.Name, key.Name, oldKeyNameExists, oldKeyName), nil
} else {
return fmt.Sprintf("multiIf(%s.`%s` IS NOT NULL, %s.`%s`::String, mapContains(%s, '%s'), %s, NULL)", column.Name, key.Name, column.Name, key.Name, oldColumn.Name, key.Name, oldKeyName), nil
}
case schema.ColumnTypeEnumString,
schema.ColumnTypeEnumUInt64,
schema.ColumnTypeEnumUInt32,
schema.ColumnTypeEnumInt8,
schema.ColumnTypeEnumInt16,
schema.ColumnTypeEnumBool,
schema.ColumnTypeEnumDateTime64,
schema.ColumnTypeEnumFixedString:
return column.Name, nil
case schema.ColumnTypeEnumLowCardinality:
switch elementType := column.Type.(schema.LowCardinalityColumnType).ElementType; elementType.GetType() {
case schema.ColumnTypeEnumString:
return column.Name, nil
default:
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "value type %s is not supported for low cardinality column type %s", elementType, column.Type)
}
case schema.ColumnTypeEnumMap:
keyType := column.Type.(schema.MapColumnType).KeyType
if _, ok := keyType.(schema.LowCardinalityColumnType); !ok {
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "key type %s is not supported for map column type %s", keyType, column.Type)
exprs := []string{}
existExpr := []string{}
for i, column := range newColumns {
// Use evolution column name if available, otherwise use the column name
columnName := column.Name
if evolutionsEntries != nil && evolutionsEntries[i] != nil {
columnName = evolutionsEntries[i].ColumnName
}
switch valueType := column.Type.(schema.MapColumnType).ValueType; valueType.GetType() {
case schema.ColumnTypeEnumString, schema.ColumnTypeEnumFloat64, schema.ColumnTypeEnumBool:
// a key could have been materialized, if so return the materialized column name
if key.Materialized {
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
switch column.Type.GetType() {
case schema.ColumnTypeEnumJSON:
// json is only supported for resource context as of now
if key.FieldContext != telemetrytypes.FieldContextResource {
return "", errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "only resource context fields are supported for json columns, got %s", key.FieldContext.String)
}
// have to add ::string as clickHouse throws an error :- data types Variant/Dynamic are not allowed in GROUP BY
// once clickHouse dependency is updated, we need to check if we can remove it.
exprs = append(exprs, fmt.Sprintf("%s.`%s`::String", columnName, key.Name))
existExpr = append(existExpr, fmt.Sprintf("%s.`%s` IS NOT NULL", columnName, key.Name))
case schema.ColumnTypeEnumString,
schema.ColumnTypeEnumUInt64,
schema.ColumnTypeEnumUInt32,
schema.ColumnTypeEnumInt8,
schema.ColumnTypeEnumInt16,
schema.ColumnTypeEnumBool,
schema.ColumnTypeEnumDateTime64,
schema.ColumnTypeEnumFixedString:
exprs = append(exprs, column.Name)
case schema.ColumnTypeEnumLowCardinality:
switch elementType := column.Type.(schema.LowCardinalityColumnType).ElementType; elementType.GetType() {
case schema.ColumnTypeEnumString:
exprs = append(exprs, column.Name)
default:
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "value type %s is not supported for low cardinality column type %s", elementType, column.Type)
}
case schema.ColumnTypeEnumMap:
keyType := column.Type.(schema.MapColumnType).KeyType
if _, ok := keyType.(schema.LowCardinalityColumnType); !ok {
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "key type %s is not supported for map column type %s", keyType, column.Type)
}
switch valueType := column.Type.(schema.MapColumnType).ValueType; valueType.GetType() {
case schema.ColumnTypeEnumString, schema.ColumnTypeEnumFloat64, schema.ColumnTypeEnumBool:
// a key could have been materialized, if so return the materialized column name
if key.Materialized {
exprs = append(exprs, telemetrytypes.FieldKeyToMaterializedColumnName(key))
existExpr = append(existExpr, fmt.Sprintf("%s==true", telemetrytypes.FieldKeyToMaterializedColumnNameForExists(key)))
} else {
exprs = append(exprs, fmt.Sprintf("%s['%s']", columnName, key.Name))
existExpr = append(existExpr, fmt.Sprintf("mapContains(%s, '%s')", columnName, key.Name))
}
default:
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "value type %s is not supported for map column type %s", valueType, column.Type)
}
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
default:
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "value type %s is not supported for map column type %s", valueType, column.Type)
}
}
if len(exprs) == 1 {
return exprs[0], nil
} else if len(exprs) > 1 {
// Ensure existExpr has the same length as exprs
if len(existExpr) != len(exprs) {
return "", errors.New(errors.TypeInternal, errors.CodeInternal, "length of exist exprs doesn't match to that of exprs")
}
finalExprs := []string{}
for i, expr := range exprs {
finalExprs = append(finalExprs, fmt.Sprintf("%s, %s", existExpr[i], expr))
}
return "multiIf(" + strings.Join(finalExprs, ", ") + ", NULL)", nil
}
// should not reach here
return column.Name, nil
return columns[0].Name, nil
}
// ColumnExpressionFor returns the column expression for the given field

View File

@@ -3,6 +3,7 @@ package telemetrytraces
import (
"context"
"testing"
"time"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
@@ -13,6 +14,7 @@ import (
func TestGetFieldKeyName(t *testing.T) {
ctx := context.Background()
mockEvolution := mockEvolutionData(time.Date(2024, 6, 2, 0, 0, 0, 0, time.UTC))
testCases := []struct {
name string
key telemetrytypes.TelemetryFieldKey
@@ -63,6 +65,7 @@ func TestGetFieldKeyName(t *testing.T) {
key: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
FieldContext: telemetrytypes.FieldContextResource,
Evolutions: mockEvolution,
},
expectedResult: "multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL)",
expectedError: nil,
@@ -74,6 +77,7 @@ func TestGetFieldKeyName(t *testing.T) {
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
Materialized: true,
Evolutions: mockEvolution,
},
expectedResult: "multiIf(resource.`deployment.environment` IS NOT NULL, resource.`deployment.environment`::String, `resource_string_deployment$$environment_exists`==true, `resource_string_deployment$$environment`, NULL)",
expectedError: nil,
@@ -92,7 +96,7 @@ func TestGetFieldKeyName(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
fm := NewFieldMapper()
result, err := fm.FieldFor(ctx, 0, 0, &tc.key)
result, err := fm.FieldFor(ctx, uint64(time.Date(2024, 6, 1, 0, 0, 0, 0, time.UTC).UnixNano()), uint64(time.Date(2024, 6, 5, 0, 0, 0, 0, time.UTC).UnixNano()), &tc.key)
if tc.expectedError != nil {
assert.Equal(t, tc.expectedError, err)
@@ -103,3 +107,86 @@ func TestGetFieldKeyName(t *testing.T) {
})
}
}
func TestFieldForResourceWithEvolution(t *testing.T) {
ctx := context.Background()
releaseTime := time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC)
evolutions := mockEvolutionData(releaseTime)
testCases := []struct {
name string
key telemetrytypes.TelemetryFieldKey
tsStart uint64
tsEnd uint64
expectedResult string
}{
{
name: "Window straddles release - both columns",
key: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
FieldContext: telemetrytypes.FieldContextResource,
Evolutions: evolutions,
},
tsStart: uint64(time.Date(2024, 6, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2025, 6, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedResult: "multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL)",
},
{
name: "Window fully after release - JSON column only",
key: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
FieldContext: telemetrytypes.FieldContextResource,
Evolutions: evolutions,
},
tsStart: uint64(time.Date(2025, 6, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2025, 7, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedResult: "resource.`service.name`::String",
},
{
name: "Window fully before release - map column only",
key: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
FieldContext: telemetrytypes.FieldContextResource,
Evolutions: evolutions,
},
tsStart: uint64(time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 6, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedResult: "resources_string['service.name']",
},
{
name: "Window fully after release - materialized resource",
key: telemetrytypes.TelemetryFieldKey{
Name: "deployment.environment",
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
Materialized: true,
Evolutions: evolutions,
},
tsStart: uint64(time.Date(2025, 6, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2025, 7, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedResult: "resource.`deployment.environment`::String",
},
{
name: "Window straddles release - materialized resource",
key: telemetrytypes.TelemetryFieldKey{
Name: "deployment.environment",
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
Materialized: true,
Evolutions: evolutions,
},
tsStart: uint64(time.Date(2024, 6, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2025, 6, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedResult: "multiIf(resource.`deployment.environment` IS NOT NULL, resource.`deployment.environment`::String, `resource_string_deployment$$environment_exists`==true, `resource_string_deployment$$environment`, NULL)",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
fm := NewFieldMapper()
result, err := fm.FieldFor(ctx, tc.tsStart, tc.tsEnd, &tc.key)
require.NoError(t, err)
assert.Equal(t, tc.expectedResult, result)
})
}
}

View File

@@ -82,13 +82,6 @@ func (b *traceQueryStatementBuilder) Build(
start = querybuilder.ToNanoSecs(start)
end = querybuilder.ToNanoSecs(end)
keySelectors := getKeySelectors(query)
keys, _, err := b.metadataStore.GetKeysMulti(ctx, keySelectors)
if err != nil {
return nil, err
}
/*
Adding a tech debt note here:
This piece of code is a hot fix and should be removed once we close issue: engineering-pod/issues/3622
@@ -124,6 +117,14 @@ func (b *traceQueryStatementBuilder) Build(
-------------------------------- End of tech debt ----------------------------
*/
// since we are modifying the selectFields, they might include keys which need evolutions so we should get keys after that.
keySelectors := getKeySelectors(query)
keys, _, err := b.metadataStore.GetKeysMulti(ctx, keySelectors)
if err != nil {
return nil, err
}
query = b.adjustKeys(ctx, keys, query, requestType)
// Create SQL builder

View File

@@ -16,6 +16,9 @@ import (
)
func TestStatementBuilder(t *testing.T) {
// releaseTime is chosen so it lands inside the standard [1747947419000, 1747983448000]ms
// test window, keeping the multiIf SQL form for resource fields.
releaseTime := time.Date(2025, 5, 22, 22, 0, 0, 0, time.UTC)
cases := []struct {
name string
requestType qbtypes.RequestType
@@ -355,7 +358,7 @@ func TestStatementBuilder(t *testing.T) {
fm := NewFieldMapper()
cb := NewConditionBuilder(fm)
mockMetadataStore := telemetrytypestest.NewMockMetadataStore()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap(releaseTime)
fl := flaggertest.New(t)
aggExprRewriter := querybuilder.NewAggExprRewriter(instrumentationtest.New().ToProviderSettings(), nil, fm, cb, nil, fl)
@@ -394,6 +397,7 @@ func TestStatementBuilder(t *testing.T) {
}
func TestStatementBuilderListQuery(t *testing.T) {
releaseTime := time.Date(2025, 5, 22, 22, 0, 0, 0, time.UTC)
cases := []struct {
name string
requestType qbtypes.RequestType
@@ -650,7 +654,7 @@ func TestStatementBuilderListQuery(t *testing.T) {
fm := NewFieldMapper()
cb := NewConditionBuilder(fm)
mockMetadataStore := telemetrytypestest.NewMockMetadataStore()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap(releaseTime)
fl := flaggertest.New(t)
aggExprRewriter := querybuilder.NewAggExprRewriter(instrumentationtest.New().ToProviderSettings(), nil, fm, cb, nil, fl)
@@ -683,6 +687,7 @@ func TestStatementBuilderListQuery(t *testing.T) {
}
func TestStatementBuilderListQueryWithCorruptData(t *testing.T) {
releaseTime := time.Date(2025, 5, 22, 22, 0, 0, 0, time.UTC)
cases := []struct {
name string
requestType qbtypes.RequestType
@@ -703,6 +708,15 @@ func TestStatementBuilderListQueryWithCorruptData(t *testing.T) {
FieldDataType: telemetrytypes.FieldDataTypeString,
},
},
"service.name": {
{
Name: "service.name",
Signal: telemetrytypes.SignalTraces,
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
Evolutions: mockEvolutionData(time.Date(2025, 5, 22, 22, 0, 0, 0, time.UTC)),
},
},
},
query: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{
Signal: telemetrytypes.SignalTraces,
@@ -728,6 +742,15 @@ func TestStatementBuilderListQueryWithCorruptData(t *testing.T) {
FieldDataType: telemetrytypes.FieldDataTypeString,
},
},
"service.name": {
{
Name: "service.name",
Signal: telemetrytypes.SignalTraces,
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
Evolutions: mockEvolutionData(time.Date(2025, 5, 22, 22, 0, 0, 0, time.UTC)),
},
},
},
query: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{
Signal: telemetrytypes.SignalTraces,
@@ -758,7 +781,7 @@ func TestStatementBuilderListQueryWithCorruptData(t *testing.T) {
mockMetadataStore := telemetrytypestest.NewMockMetadataStore()
mockMetadataStore.KeysMap = c.keysMap
if mockMetadataStore.KeysMap == nil {
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap(releaseTime)
}
fl := flaggertest.New(t)
aggExprRewriter := querybuilder.NewAggExprRewriter(instrumentationtest.New().ToProviderSettings(), nil, fm, cb, nil, fl)
@@ -788,7 +811,90 @@ func TestStatementBuilderListQueryWithCorruptData(t *testing.T) {
}
}
func TestStatementBuilderGroupByResourceEvolution(t *testing.T) {
releaseTime := time.Date(2025, 5, 22, 22, 0, 0, 0, time.UTC)
cases := []struct {
name string
startMs uint64
endMs uint64
expected qbtypes.Statement
}{
{
name: "window straddles release - both JSON and map branches",
startMs: 1747947419000, // 2025-05-22 21:56:59 UTC, ~3m before release
endMs: 1747983448000, // 2025-05-23 07:57:28 UTC, ~10h after release
expected: qbtypes.Statement{
Query: "WITH __limit_cte AS (SELECT toString(multiIf(multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) IS NOT NULL, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL), NULL)) AS `service.name`, count() AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(timestamp, INTERVAL 30 SECOND) AS ts, toString(multiIf(multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) IS NOT NULL, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL), NULL)) AS `service.name`, count() AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name`",
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10, "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448)},
},
},
{
name: "window after release - JSON column only",
startMs: 1747960000000, // 2025-05-23 00:26:40 UTC, ~2.5h after release
endMs: 1747983448000, // 2025-05-23 07:57:28 UTC
expected: qbtypes.Statement{
Query: "WITH __limit_cte AS (SELECT toString(multiIf(resource.`service.name`::String IS NOT NULL, resource.`service.name`::String, NULL)) AS `service.name`, count() AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(timestamp, INTERVAL 30 SECOND) AS ts, toString(multiIf(resource.`service.name`::String IS NOT NULL, resource.`service.name`::String, NULL)) AS `service.name`, count() AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name`",
Args: []any{"1747960000000000000", "1747983448000000000", uint64(1747958200), uint64(1747983448), 10, "1747960000000000000", "1747983448000000000", uint64(1747958200), uint64(1747983448)},
},
},
{
name: "window before release - map column only",
startMs: 1747900000000, // 2025-05-22 08:26:40 UTC, ~13.5h before release
endMs: 1747947000000, // 2025-05-22 21:50:00 UTC, ~10m before release
expected: qbtypes.Statement{
Query: "WITH __limit_cte AS (SELECT toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(timestamp, INTERVAL 30 SECOND) AS ts, toString(multiIf(mapContains(resources_string, 'service.name') = ?, resources_string['service.name'], NULL)) AS `service.name`, count() AS __result_0 FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name`",
Args: []any{true, "1747900000000000000", "1747947000000000000", uint64(1747898200), uint64(1747947000), 10, true, "1747900000000000000", "1747947000000000000", uint64(1747898200), uint64(1747947000)},
},
},
}
fm := NewFieldMapper()
cb := NewConditionBuilder(fm)
mockMetadataStore := telemetrytypestest.NewMockMetadataStore()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap(releaseTime)
fl := flaggertest.New(t)
aggExprRewriter := querybuilder.NewAggExprRewriter(instrumentationtest.New().ToProviderSettings(), nil, fm, cb, nil, fl)
statementBuilder := NewTraceQueryStatementBuilder(
instrumentationtest.New().ToProviderSettings(),
mockMetadataStore,
fm,
cb,
aggExprRewriter,
nil,
fl,
)
query := qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{
Signal: telemetrytypes.SignalTraces,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
Aggregations: []qbtypes.TraceAggregation{
{Expression: "count()"},
},
Filter: &qbtypes.Filter{},
Limit: 10,
GroupBy: []qbtypes.GroupByKey{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
},
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
q, err := statementBuilder.Build(context.Background(), c.startMs, c.endMs, qbtypes.RequestTypeTimeSeries, query, nil)
require.NoError(t, err)
require.Equal(t, c.expected.Query, q.Query)
require.Equal(t, c.expected.Args, q.Args)
})
}
}
func TestStatementBuilderTraceQuery(t *testing.T) {
releaseTime := time.Date(2025, 5, 22, 22, 0, 0, 0, time.UTC)
cases := []struct {
name string
requestType qbtypes.RequestType
@@ -911,7 +1017,7 @@ func TestStatementBuilderTraceQuery(t *testing.T) {
fm := NewFieldMapper()
cb := NewConditionBuilder(fm)
mockMetadataStore := telemetrytypestest.NewMockMetadataStore()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap(releaseTime)
fl := flaggertest.New(t)
aggExprRewriter := querybuilder.NewAggExprRewriter(instrumentationtest.New().ToProviderSettings(), nil, fm, cb, nil, fl)
@@ -944,6 +1050,7 @@ func TestStatementBuilderTraceQuery(t *testing.T) {
}
func TestAdjustKey(t *testing.T) {
releaseTime := time.Date(2025, 5, 22, 22, 0, 0, 0, time.UTC)
cases := []struct {
name string
inputKey telemetrytypes.TelemetryFieldKey
@@ -957,7 +1064,7 @@ func TestAdjustKey(t *testing.T) {
FieldContext: telemetrytypes.FieldContextUnspecified,
FieldDataType: telemetrytypes.FieldDataTypeUnspecified,
},
keysMap: buildCompleteFieldKeyMap(),
keysMap: buildCompleteFieldKeyMap(releaseTime),
expectedKey: IntrinsicFields["trace_id"],
},
{
@@ -967,7 +1074,7 @@ func TestAdjustKey(t *testing.T) {
FieldContext: telemetrytypes.FieldContextBody, // incorrect context
FieldDataType: telemetrytypes.FieldDataTypeInt64,
},
keysMap: buildCompleteFieldKeyMap(),
keysMap: buildCompleteFieldKeyMap(releaseTime),
expectedKey: telemetrytypes.TelemetryFieldKey{
Name: "duration_nano",
FieldContext: telemetrytypes.FieldContextSpan, // should be corrected
@@ -981,7 +1088,7 @@ func TestAdjustKey(t *testing.T) {
FieldContext: telemetrytypes.FieldContextSpan, // correct context
FieldDataType: telemetrytypes.FieldDataTypeInt64,
},
keysMap: buildCompleteFieldKeyMap(),
keysMap: buildCompleteFieldKeyMap(releaseTime),
expectedKey: telemetrytypes.TelemetryFieldKey{
Name: "duration_nano",
FieldContext: telemetrytypes.FieldContextSpan, // should be corrected
@@ -995,8 +1102,8 @@ func TestAdjustKey(t *testing.T) {
FieldContext: telemetrytypes.FieldContextUnspecified,
FieldDataType: telemetrytypes.FieldDataTypeUnspecified,
},
keysMap: buildCompleteFieldKeyMap(),
expectedKey: *buildCompleteFieldKeyMap()["service.name"][0],
keysMap: buildCompleteFieldKeyMap(releaseTime),
expectedKey: *buildCompleteFieldKeyMap(releaseTime)["service.name"][0],
},
{
name: "single matching key with context specified - override",
@@ -1005,8 +1112,8 @@ func TestAdjustKey(t *testing.T) {
FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeUnspecified,
},
keysMap: buildCompleteFieldKeyMap(),
expectedKey: *buildCompleteFieldKeyMap()["cart.items_count"][0],
keysMap: buildCompleteFieldKeyMap(releaseTime),
expectedKey: *buildCompleteFieldKeyMap(releaseTime)["cart.items_count"][0],
},
{
name: "multiple matching keys - all materialized",
@@ -1043,7 +1150,7 @@ func TestAdjustKey(t *testing.T) {
FieldContext: telemetrytypes.FieldContextUnspecified,
FieldDataType: telemetrytypes.FieldDataTypeUnspecified,
},
keysMap: buildCompleteFieldKeyMap(),
keysMap: buildCompleteFieldKeyMap(releaseTime),
expectedKey: telemetrytypes.TelemetryFieldKey{
Name: "mixed.materialization.key",
FieldDataType: telemetrytypes.FieldDataTypeString,
@@ -1057,7 +1164,7 @@ func TestAdjustKey(t *testing.T) {
FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeUnspecified,
},
keysMap: buildCompleteFieldKeyMap(),
keysMap: buildCompleteFieldKeyMap(releaseTime),
expectedKey: telemetrytypes.TelemetryFieldKey{
Name: "mixed.materialization.key",
FieldContext: telemetrytypes.FieldContextAttribute,
@@ -1072,7 +1179,7 @@ func TestAdjustKey(t *testing.T) {
FieldContext: telemetrytypes.FieldContextUnspecified,
FieldDataType: telemetrytypes.FieldDataTypeUnspecified,
},
keysMap: buildCompleteFieldKeyMap(),
keysMap: buildCompleteFieldKeyMap(releaseTime),
expectedKey: telemetrytypes.TelemetryFieldKey{
Name: "unknown.field",
Materialized: false,
@@ -1085,7 +1192,7 @@ func TestAdjustKey(t *testing.T) {
FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeUnspecified,
},
keysMap: buildCompleteFieldKeyMap(),
keysMap: buildCompleteFieldKeyMap(releaseTime),
expectedKey: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
FieldContext: telemetrytypes.FieldContextAttribute,
@@ -1100,7 +1207,7 @@ func TestAdjustKey(t *testing.T) {
FieldContext: telemetrytypes.FieldContextUnspecified,
FieldDataType: telemetrytypes.FieldDataTypeUnspecified,
},
keysMap: buildCompleteFieldKeyMap(),
keysMap: buildCompleteFieldKeyMap(releaseTime),
expectedKey: telemetrytypes.TelemetryFieldKey{
Name: "cart.items_count",
FieldContext: telemetrytypes.FieldContextAttribute,
@@ -1115,7 +1222,7 @@ func TestAdjustKey(t *testing.T) {
FieldContext: telemetrytypes.FieldContextUnspecified,
FieldDataType: telemetrytypes.FieldDataTypeUnspecified,
},
keysMap: buildCompleteFieldKeyMap(),
keysMap: buildCompleteFieldKeyMap(releaseTime),
expectedKey: telemetrytypes.TelemetryFieldKey{
Name: "user.id",
FieldContext: telemetrytypes.FieldContextAttribute,
@@ -1158,6 +1265,7 @@ func TestAdjustKey(t *testing.T) {
}
func TestAdjustKeys(t *testing.T) {
releaseTime := time.Date(2025, 5, 22, 22, 0, 0, 0, time.UTC)
cases := []struct {
name string
query qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]
@@ -1183,7 +1291,7 @@ func TestAdjustKeys(t *testing.T) {
},
},
},
keysMap: buildCompleteFieldKeyMap(),
keysMap: buildCompleteFieldKeyMap(releaseTime),
expectedSelectFields: []telemetrytypes.TelemetryFieldKey{
{
Name: "service.name",
@@ -1220,7 +1328,7 @@ func TestAdjustKeys(t *testing.T) {
},
},
},
keysMap: buildCompleteFieldKeyMap(),
keysMap: buildCompleteFieldKeyMap(releaseTime),
expectedGroupBy: []qbtypes.GroupByKey{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
@@ -1267,7 +1375,7 @@ func TestAdjustKeys(t *testing.T) {
},
},
},
keysMap: buildCompleteFieldKeyMap(),
keysMap: buildCompleteFieldKeyMap(releaseTime),
expectedOrder: []qbtypes.OrderBy{
{
Key: qbtypes.OrderByKey{
@@ -1326,7 +1434,7 @@ func TestAdjustKeys(t *testing.T) {
},
},
},
keysMap: buildCompleteFieldKeyMap(),
keysMap: buildCompleteFieldKeyMap(releaseTime),
expectedSelectFields: []telemetrytypes.TelemetryFieldKey{
{
Name: "trace_id",
@@ -1381,7 +1489,7 @@ func TestAdjustKeys(t *testing.T) {
},
},
},
keysMap: buildCompleteFieldKeyMap(),
keysMap: buildCompleteFieldKeyMap(releaseTime),
// After alias adjustment, name becomes "span.duration" with FieldContextUnspecified
// "span.duration" is not in keysMap, so context stays unspecified
expectedOrder: []qbtypes.OrderBy{

View File

@@ -1,10 +1,12 @@
package telemetrytraces
import (
"time"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
)
func buildCompleteFieldKeyMap() map[string][]*telemetrytypes.TelemetryFieldKey {
func buildCompleteFieldKeyMap(releaseTime time.Time) map[string][]*telemetrytypes.TelemetryFieldKey {
keysMap := map[string][]*telemetrytypes.TelemetryFieldKey{
"service.name": {
{
@@ -115,7 +117,33 @@ func buildCompleteFieldKeyMap() map[string][]*telemetrytypes.TelemetryFieldKey {
for _, keys := range keysMap {
for _, key := range keys {
key.Signal = telemetrytypes.SignalTraces
if key.FieldContext == telemetrytypes.FieldContextResource {
key.Evolutions = mockEvolutionData(releaseTime)
}
}
}
return keysMap
}
// mockEvolutionData returns the canonical resource-column evolution timeline used in tests:
// the legacy resources_string map at epoch 0 and the JSON resource column released at releaseTime.
func mockEvolutionData(releaseTime time.Time) []*telemetrytypes.EvolutionEntry {
return []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalTraces,
ColumnName: "resources_string",
FieldContext: telemetrytypes.FieldContextResource,
ColumnType: "Map(LowCardinality(String), String)",
FieldName: "__all__",
ReleaseTime: time.Unix(0, 0),
},
{
Signal: telemetrytypes.SignalTraces,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: releaseTime,
},
}
}

View File

@@ -398,21 +398,27 @@ func (b *traceOperatorCTEBuilder) buildNotCTE(leftCTE, rightCTE string) (string,
}
func (b *traceOperatorCTEBuilder) buildFinalQuery(ctx context.Context, selectFromCTE string, requestType qbtypes.RequestType) (*qbtypes.Statement, error) {
keys, _, err := b.stmtBuilder.metadataStore.GetKeysMulti(ctx, b.getKeySelectors())
if err != nil {
return nil, err
}
b.adjustKeys(keys)
switch requestType {
case qbtypes.RequestTypeRaw:
return b.buildListQuery(ctx, selectFromCTE)
return b.buildListQuery(ctx, selectFromCTE, keys)
case qbtypes.RequestTypeTimeSeries:
return b.buildTimeSeriesQuery(ctx, selectFromCTE)
return b.buildTimeSeriesQuery(ctx, selectFromCTE, keys)
case qbtypes.RequestTypeTrace:
return b.buildTraceQuery(ctx, selectFromCTE)
return b.buildTraceQuery(ctx, selectFromCTE, keys)
case qbtypes.RequestTypeScalar:
return b.buildScalarQuery(ctx, selectFromCTE)
return b.buildScalarQuery(ctx, selectFromCTE, keys)
default:
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "unsupported request type: %s", requestType)
}
}
func (b *traceOperatorCTEBuilder) buildListQuery(ctx context.Context, selectFromCTE string) (*qbtypes.Statement, error) {
func (b *traceOperatorCTEBuilder) buildListQuery(ctx context.Context, selectFromCTE string, keys map[string][]*telemetrytypes.TelemetryFieldKey) (*qbtypes.Statement, error) {
sb := sqlbuilder.NewSelectBuilder()
// Select core fields
@@ -434,22 +440,6 @@ func (b *traceOperatorCTEBuilder) buildListQuery(ctx context.Context, selectFrom
"parent_span_id": true,
}
// Get keys for selectFields
keySelectors := b.getKeySelectors()
for _, field := range b.operator.SelectFields {
keySelectors = append(keySelectors, &telemetrytypes.FieldKeySelector{
Name: field.Name,
Signal: telemetrytypes.SignalTraces,
FieldContext: field.FieldContext,
FieldDataType: field.FieldDataType,
})
}
keys, _, err := b.stmtBuilder.metadataStore.GetKeysMulti(ctx, keySelectors)
if err != nil {
return nil, err
}
// Add selectFields using ColumnExpressionFor since we now have all base table columns
for _, field := range b.operator.SelectFields {
if selectedFields[field.Name] {
@@ -526,6 +516,15 @@ func (b *traceOperatorCTEBuilder) getKeySelectors() []*telemetrytypes.FieldKeySe
})
}
for _, field := range b.operator.SelectFields {
keySelectors = append(keySelectors, &telemetrytypes.FieldKeySelector{
Name: field.Name,
Signal: telemetrytypes.SignalTraces,
FieldContext: field.FieldContext,
FieldDataType: field.FieldDataType,
})
}
for i := range keySelectors {
keySelectors[i].Signal = telemetrytypes.SignalTraces
}
@@ -533,7 +532,7 @@ func (b *traceOperatorCTEBuilder) getKeySelectors() []*telemetrytypes.FieldKeySe
return keySelectors
}
func (b *traceOperatorCTEBuilder) buildTimeSeriesQuery(ctx context.Context, selectFromCTE string) (*qbtypes.Statement, error) {
func (b *traceOperatorCTEBuilder) buildTimeSeriesQuery(ctx context.Context, selectFromCTE string, keys map[string][]*telemetrytypes.TelemetryFieldKey) (*qbtypes.Statement, error) {
sb := sqlbuilder.NewSelectBuilder()
sb.Select(fmt.Sprintf(
@@ -541,12 +540,6 @@ func (b *traceOperatorCTEBuilder) buildTimeSeriesQuery(ctx context.Context, sele
int64(b.operator.StepInterval.Seconds()),
))
keySelectors := b.getKeySelectors()
keys, _, err := b.stmtBuilder.metadataStore.GetKeysMulti(ctx, keySelectors)
if err != nil {
return nil, err
}
var allGroupByArgs []any
for _, gb := range b.operator.GroupBy {
@@ -625,8 +618,7 @@ func (b *traceOperatorCTEBuilder) buildTimeSeriesQuery(ctx context.Context, sele
combinedArgs := append(allGroupByArgs, allAggChArgs...)
// Add HAVING clause if specified
err = b.addHavingClause(sb)
if err != nil {
if err := b.addHavingClause(sb); err != nil {
return nil, err
}
@@ -653,17 +645,11 @@ func (b *traceOperatorCTEBuilder) buildTraceSummaryCTE(selectFromCTE string) {
b.addCTE("trace_summary", sql, args, []string{"all_spans", selectFromCTE})
}
func (b *traceOperatorCTEBuilder) buildTraceQuery(ctx context.Context, selectFromCTE string) (*qbtypes.Statement, error) {
func (b *traceOperatorCTEBuilder) buildTraceQuery(ctx context.Context, selectFromCTE string, keys map[string][]*telemetrytypes.TelemetryFieldKey) (*qbtypes.Statement, error) {
b.buildTraceSummaryCTE(selectFromCTE)
sb := sqlbuilder.NewSelectBuilder()
keySelectors := b.getKeySelectors()
keys, _, err := b.stmtBuilder.metadataStore.GetKeysMulti(ctx, keySelectors)
if err != nil {
return nil, err
}
var allGroupByArgs []any
for _, gb := range b.operator.GroupBy {
@@ -745,8 +731,7 @@ func (b *traceOperatorCTEBuilder) buildTraceQuery(ctx context.Context, selectFro
sb.GroupBy(groupByKeys...)
}
err = b.addHavingClause(sb)
if err != nil {
if err := b.addHavingClause(sb); err != nil {
return nil, err
}
@@ -802,15 +787,9 @@ func (b *traceOperatorCTEBuilder) buildTraceQuery(ctx context.Context, selectFro
}, nil
}
func (b *traceOperatorCTEBuilder) buildScalarQuery(ctx context.Context, selectFromCTE string) (*qbtypes.Statement, error) {
func (b *traceOperatorCTEBuilder) buildScalarQuery(ctx context.Context, selectFromCTE string, keys map[string][]*telemetrytypes.TelemetryFieldKey) (*qbtypes.Statement, error) {
sb := sqlbuilder.NewSelectBuilder()
keySelectors := b.getKeySelectors()
keys, _, err := b.stmtBuilder.metadataStore.GetKeysMulti(ctx, keySelectors)
if err != nil {
return nil, err
}
var allGroupByArgs []any
for _, gb := range b.operator.GroupBy {
@@ -892,8 +871,7 @@ func (b *traceOperatorCTEBuilder) buildScalarQuery(ctx context.Context, selectFr
combinedArgs := append(allGroupByArgs, allAggChArgs...)
// Add HAVING clause if specified
err = b.addHavingClause(sb)
if err != nil {
if err := b.addHavingClause(sb); err != nil {
return nil, err
}
@@ -936,3 +914,16 @@ func (b *traceOperatorCTEBuilder) aggOrderBy(k qbtypes.OrderBy) (int, bool) {
}
return 0, false
}
func (b *traceOperatorCTEBuilder) adjustKeys(keys map[string][]*telemetrytypes.TelemetryFieldKey) {
// todo: this needs to be updated w.r.t trace statement builder.
for i := range b.operator.SelectFields {
querybuilder.AdjustKey(&b.operator.SelectFields[i], keys, nil)
}
for i := range b.operator.GroupBy {
querybuilder.AdjustKey(&b.operator.GroupBy[i].TelemetryFieldKey, keys, nil)
}
for i := range b.operator.Order {
querybuilder.AdjustKey(&b.operator.Order[i].Key.TelemetryFieldKey, keys, nil)
}
}

View File

@@ -15,6 +15,7 @@ import (
)
func TestTraceOperatorStatementBuilder(t *testing.T) {
releaseTime := time.Date(2025, 5, 22, 22, 0, 0, 0, time.UTC)
cases := []struct {
name string
requestType qbtypes.RequestType
@@ -390,7 +391,7 @@ func TestTraceOperatorStatementBuilder(t *testing.T) {
fm := NewFieldMapper()
cb := NewConditionBuilder(fm)
mockMetadataStore := telemetrytypestest.NewMockMetadataStore()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap(releaseTime)
fl := flaggertest.New(t)
aggExprRewriter := querybuilder.NewAggExprRewriter(instrumentationtest.New().ToProviderSettings(), nil, fm, cb, nil, fl)
@@ -443,6 +444,7 @@ func TestTraceOperatorStatementBuilder(t *testing.T) {
}
func TestTraceOperatorStatementBuilderErrors(t *testing.T) {
releaseTime := time.Date(2025, 5, 22, 22, 0, 0, 0, time.UTC)
cases := []struct {
name string
operator qbtypes.QueryBuilderTraceOperator
@@ -506,7 +508,7 @@ func TestTraceOperatorStatementBuilderErrors(t *testing.T) {
fm := NewFieldMapper()
cb := NewConditionBuilder(fm)
mockMetadataStore := telemetrytypestest.NewMockMetadataStore()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap(releaseTime)
fl := flaggertest.New(t)
aggExprRewriter := querybuilder.NewAggExprRewriter(instrumentationtest.New().ToProviderSettings(), nil, fm, cb, nil, fl)

View File

@@ -4,6 +4,7 @@ import (
"context"
"strings"
"testing"
"time"
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
"github.com/SigNoz/signoz/pkg/querybuilder"
@@ -16,12 +17,13 @@ import (
)
func TestTraceTimeRangeOptimization(t *testing.T) {
releaseTime := time.Date(2025, 5, 22, 22, 0, 0, 0, time.UTC)
fm := NewFieldMapper()
cb := NewConditionBuilder(fm)
mockMetadataStore := telemetrytypestest.NewMockMetadataStore()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap(releaseTime)
mockMetadataStore.KeysMap["trace_id"] = []*telemetrytypes.TelemetryFieldKey{{
Name: "trace_id",
FieldContext: telemetrytypes.FieldContextSpan,

View File

@@ -0,0 +1,119 @@
package querybuildertypesv5
import (
"slices"
"sort"
"strconv"
"time"
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
)
// SelectEvolutionsForColumns selects the appropriate evolution entries for each column based on the time range.
// Logic:
// - Finds the latest base evolution (<= tsStartTime) across ALL columns
// - Rejects all evolutions before this latest base evolution
// - For duplicate evolutions it considers the oldest one (first in ReleaseTime)
// - For each column, includes its evolution if it's >= latest base evolution and <= tsEndTime
// - Results are sorted by ReleaseTime descending (newest first)
func SelectEvolutionsForColumns(columns []*schema.Column, evolutions []*telemetrytypes.EvolutionEntry, tsStart, tsEnd uint64) ([]*schema.Column, []*telemetrytypes.EvolutionEntry, error) {
sortedEvolutions := make([]*telemetrytypes.EvolutionEntry, len(evolutions))
copy(sortedEvolutions, evolutions)
// sort the evolutions by ReleaseTime ascending
sort.Slice(sortedEvolutions, func(i, j int) bool {
return sortedEvolutions[i].ReleaseTime.Before(sortedEvolutions[j].ReleaseTime)
})
tsStartTime := time.Unix(0, int64(tsStart))
tsEndTime := time.Unix(0, int64(tsEnd))
// Build evolution map: column name -> evolution
evolutionMap := make(map[string]*telemetrytypes.EvolutionEntry)
for _, evolution := range sortedEvolutions {
if _, exists := evolutionMap[evolution.ColumnName+":"+evolution.FieldName+":"+strconv.Itoa(int(evolution.Version))]; exists {
// since if there is duplicate we would just use the oldest one.
continue
}
evolutionMap[evolution.ColumnName+":"+evolution.FieldName+":"+strconv.Itoa(int(evolution.Version))] = evolution
}
// Find the latest base evolution (<= tsStartTime) across ALL columns
// Evolutions are sorted, so we can break early
var latestBaseEvolutionAcrossAll *telemetrytypes.EvolutionEntry
for _, evolution := range sortedEvolutions {
if evolution.ReleaseTime.After(tsStartTime) {
break
}
latestBaseEvolutionAcrossAll = evolution
}
// We shouldn't reach this, it basically means there is something wrong with the evolutions data
if latestBaseEvolutionAcrossAll == nil {
return nil, nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "no base evolution found for columns %v", columns)
}
columnLookUpMap := make(map[string]*schema.Column)
for _, column := range columns {
columnLookUpMap[column.Name] = column
}
// Collect column-evolution pairs
type colEvoPair struct {
column *schema.Column
evolution *telemetrytypes.EvolutionEntry
}
pairs := []colEvoPair{}
for _, evolution := range evolutionMap {
// Reject evolutions before the latest base evolution
if evolution.ReleaseTime.Before(latestBaseEvolutionAcrossAll.ReleaseTime) {
continue
}
// skip evolutions after tsEndTime
if evolution.ReleaseTime.After(tsEndTime) || evolution.ReleaseTime.Equal(tsEndTime) {
continue
}
if _, exists := columnLookUpMap[evolution.ColumnName]; !exists {
return nil, nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "evolution column %s not found in columns %v", evolution.ColumnName, columns)
}
pairs = append(pairs, colEvoPair{columnLookUpMap[evolution.ColumnName], evolution})
}
// If no pairs found, fall back to latestBaseEvolutionAcrossAll for matching columns
if len(pairs) == 0 {
for _, column := range columns {
// Use latestBaseEvolutionAcrossAll if this column name matches its column name
if column.Name == latestBaseEvolutionAcrossAll.ColumnName {
pairs = append(pairs, colEvoPair{column, latestBaseEvolutionAcrossAll})
}
}
}
// Sort by ReleaseTime descending (newest first)
slices.SortFunc(pairs, func(a, b colEvoPair) int {
// Sort by ReleaseTime descending (newest first)
if a.evolution.ReleaseTime.After(b.evolution.ReleaseTime) {
return -1
}
if a.evolution.ReleaseTime.Before(b.evolution.ReleaseTime) {
return 1
}
return 0
})
// Extract results
newColumns := make([]*schema.Column, len(pairs))
evolutionsEntries := make([]*telemetrytypes.EvolutionEntry, len(pairs))
for i, pair := range pairs {
newColumns[i] = pair.column
evolutionsEntries[i] = pair.evolution
}
return newColumns, evolutionsEntries, nil
}

View File

@@ -0,0 +1,414 @@
package querybuildertypesv5
import (
"testing"
"time"
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
LogsV2BodyV2Column = "body_v2"
LogsV2BodyPromotedColumn = "body_promoted"
)
var (
resources_string = &schema.Column{Name: "resources_string", Type: schema.MapColumnType{
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
ValueType: schema.ColumnTypeString,
}}
resource = &schema.Column{Name: "resource", Type: schema.JSONColumnType{}}
attributes_string = &schema.Column{Name: "attributes_string", Type: schema.MapColumnType{
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
ValueType: schema.ColumnTypeString,
}}
body_v2 = &schema.Column{Name: LogsV2BodyV2Column, Type: schema.JSONColumnType{}}
body_promoted = &schema.Column{Name: LogsV2BodyPromotedColumn, Type: schema.JSONColumnType{}}
)
func TestSelectEvolutionsForColumns(t *testing.T) {
testCases := []struct {
name string
columns []*schema.Column
evolutions []*telemetrytypes.EvolutionEntry
tsStart uint64
tsEnd uint64
expectedColumns []string // column names
expectedEvols []string // evolution column names
expectedError bool
errorStr string
}{
{
name: "New evolutions at tsStartTime - should include latest evolution",
columns: []*schema.Column{
resources_string,
resource,
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 0,
ReleaseTime: time.Date(0, 0, 0, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 1,
ReleaseTime: time.Date(2024, 2, 25, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 25, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 30, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{"resource"},
expectedEvols: []string{"resource"},
},
{
name: "New evolutions after tsStartTime but less than tsEndTime - should include both",
columns: []*schema.Column{
resources_string,
resource,
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 0,
ReleaseTime: time.Date(0, 0, 0, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 1,
ReleaseTime: time.Date(2024, 2, 3, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{"resource", "resources_string"}, // sorted by ReleaseTime desc
expectedEvols: []string{"resource", "resources_string"},
},
{
name: "Columns without matching evolutions - should exclude them",
columns: []*schema.Column{
resources_string,
resource, // no evolution for this
attributes_string, // no evolution for this
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 0,
ReleaseTime: time.Date(2024, 1, 15, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{"resources_string"},
expectedEvols: []string{"resources_string"},
},
{
name: "New evolutions at tsEndTime - should not include new evolution",
columns: []*schema.Column{
resources_string,
resource,
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 0,
ReleaseTime: time.Date(0, 0, 0, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 1,
ReleaseTime: time.Date(2024, 2, 30, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 25, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 30, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{"resources_string"},
expectedEvols: []string{"resources_string"},
},
{
name: "New evolutions after tsEndTime - should exclude new",
columns: []*schema.Column{
resources_string,
resource,
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 0,
ReleaseTime: time.Date(0, 0, 0, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 1,
ReleaseTime: time.Date(2024, 2, 25, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{"resources_string"},
expectedEvols: []string{"resources_string"},
},
{
name: "Empty columns array",
columns: []*schema.Column{},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 0,
ReleaseTime: time.Date(2024, 1, 15, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{},
expectedEvols: []string{},
expectedError: true,
errorStr: "column resources_string not found",
},
{
name: "Duplicate evolutions - should use first encountered (oldest if sorted)",
columns: []*schema.Column{
resource,
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 0,
ReleaseTime: time.Date(0, 0, 0, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 1,
ReleaseTime: time.Date(2024, 1, 15, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 1,
ReleaseTime: time.Date(2024, 1, 20, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{"resource"},
expectedEvols: []string{"resource"}, // should use first one (older)
},
{
name: "Genuine Duplicate evolutions with new version- should consider both",
columns: []*schema.Column{
resources_string,
resource,
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 0,
ReleaseTime: time.Date(0, 0, 0, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 1,
ReleaseTime: time.Date(2024, 1, 15, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 2,
ReleaseTime: time.Date(2024, 1, 20, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 1, 16, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{"resources_string", "resource"},
expectedEvols: []string{"resources_string", "resource"}, // should use first one (older)
},
{
name: "Evolution exactly at tsEndTime",
columns: []*schema.Column{
resources_string,
resource,
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: time.Date(2024, 1, 15, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC), // exactly at tsEnd
},
},
tsStart: uint64(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{"resources_string"}, // resource excluded because After(tsEnd) is true
expectedEvols: []string{"resources_string"},
},
{
name: "Single evolution after tsStartTime - JSON body",
columns: []*schema.Column{
body_v2,
body_promoted,
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: LogsV2BodyV2Column,
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextBody,
FieldName: "__all__",
ReleaseTime: time.Unix(0, 0),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: LogsV2BodyPromotedColumn,
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextBody,
FieldName: "user.name",
ReleaseTime: time.Date(2024, 2, 2, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{LogsV2BodyPromotedColumn, LogsV2BodyV2Column}, // sorted by ReleaseTime desc (newest first)
expectedEvols: []string{LogsV2BodyPromotedColumn, LogsV2BodyV2Column},
},
{
name: "No evolution after tsStartTime - JSON body",
columns: []*schema.Column{
body_v2,
body_promoted,
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: LogsV2BodyV2Column,
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextBody,
FieldName: "__all__",
ReleaseTime: time.Unix(0, 0),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: LogsV2BodyPromotedColumn,
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextBody,
FieldName: "user.name",
ReleaseTime: time.Date(2024, 2, 2, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 3, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{LogsV2BodyPromotedColumn},
expectedEvols: []string{LogsV2BodyPromotedColumn},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
resultColumns, resultEvols, err := SelectEvolutionsForColumns(tc.columns, tc.evolutions, tc.tsStart, tc.tsEnd)
if tc.expectedError {
assert.Contains(t, err.Error(), tc.errorStr)
} else {
require.NoError(t, err)
assert.Equal(t, len(tc.expectedColumns), len(resultColumns), "column count mismatch")
assert.Equal(t, len(tc.expectedEvols), len(resultEvols), "evolution count mismatch")
resultColumnNames := make([]string, len(resultColumns))
for i, col := range resultColumns {
resultColumnNames[i] = col.Name
}
resultEvolNames := make([]string, len(resultEvols))
for i, evol := range resultEvols {
resultEvolNames[i] = evol.ColumnName
}
for i := range tc.expectedColumns {
assert.Equal(t, resultColumnNames[i], tc.expectedColumns[i], "expected column missing: "+tc.expectedColumns[i])
}
for i := range tc.expectedEvols {
assert.Equal(t, resultEvolNames[i], tc.expectedEvols[i], "expected evolution missing: "+tc.expectedEvols[i])
}
// Verify sorting: should be descending by ReleaseTime
for i := 0; i < len(resultEvols)-1; i++ {
assert.True(t, !resultEvols[i].ReleaseTime.Before(resultEvols[i+1].ReleaseTime),
"evolutions should be sorted descending by ReleaseTime")
}
}
})
}
}

View File

@@ -1,12 +0,0 @@
package retentiontypes
import (
"context"
"github.com/SigNoz/signoz/pkg/valuer"
)
type Store interface {
// ListTTLSettingsByTableNameAndBeforeCreatedAt returns successful TTL settings before the given timestamp.
ListTTLSettingsByTableNameAndBeforeCreatedAt(ctx context.Context, orgID valuer.UUID, tableName string, beforeMs int64) ([]*TTLSetting, error)
}

View File

@@ -1,141 +1,10 @@
package retentiontypes
import (
"encoding/json"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types"
"github.com/uptrace/bun"
)
const secondsPerDay = 24 * 60 * 60
const (
DefaultLogsRetentionDays = 15
DefaultMetricsRetentionDays = 30
DefaultTracesRetentionDays = 15
)
const (
TraceTTL = "traces"
MetricsTTL = "metrics"
LogsTTL = "logs"
)
const (
TTLSettingStatusPending = "pending"
TTLSettingStatusFailed = "failed"
TTLSettingStatusSuccess = "success"
)
// RetentionPolicySegment is a half-open time range using one retention policy.
type RetentionPolicySegment struct {
StartMs int64
EndMs int64
Rules []CustomRetentionRule
DefaultDays int
}
// NewRetentionPolicySegment creates a retention policy segment for a half-open time range.
func NewRetentionPolicySegment(startMs int64, endMs int64, rules []CustomRetentionRule, defaultDays int) *RetentionPolicySegment {
return &RetentionPolicySegment{
StartMs: startMs,
EndMs: endMs,
Rules: rules,
DefaultDays: defaultDays,
}
}
// BuildRetentionPolicySegmentsFromRows converts successful TTL settings into retention policy segments.
func BuildRetentionPolicySegmentsFromRows(rows []*TTLSetting, fallbackDefaultDays int, startMs, endMs int64) ([]*RetentionPolicySegment, error) {
if startMs >= endMs {
return nil, nil
}
var activeAtStart *TTLSetting
inWindow := make([]*TTLSetting, 0, len(rows))
for _, row := range rows {
rowMs := row.CreatedAt.UnixMilli()
if rowMs <= startMs {
activeAtStart = row
continue
}
if rowMs >= endMs {
continue
}
inWindow = append(inWindow, row)
}
activeRules, activeDefault, err := parseTTLSetting(activeAtStart, fallbackDefaultDays)
if err != nil {
return nil, err
}
segments := make([]*RetentionPolicySegment, 0, len(inWindow)+1)
cursor := startMs
for _, row := range inWindow {
rowMs := row.CreatedAt.UnixMilli()
if rowMs <= cursor {
activeRules, activeDefault, err = parseTTLSetting(row, fallbackDefaultDays)
if err != nil {
return nil, err
}
continue
}
segments = append(segments, NewRetentionPolicySegment(cursor, rowMs, activeRules, activeDefault))
cursor = rowMs
activeRules, activeDefault, err = parseTTLSetting(row, fallbackDefaultDays)
if err != nil {
return nil, err
}
}
if cursor < endMs {
segments = append(segments, NewRetentionPolicySegment(cursor, endMs, activeRules, activeDefault))
}
return segments, nil
}
func parseTTLSetting(row *TTLSetting, fallbackDefaultDays int) ([]CustomRetentionRule, int, error) {
if row == nil {
return nil, fallbackDefaultDays, nil
}
defaultDays := row.TTL
if row.Condition == "" {
defaultDays = (row.TTL + secondsPerDay - 1) / secondsPerDay
}
if defaultDays <= 0 {
defaultDays = fallbackDefaultDays
}
if row.Condition == "" {
return nil, defaultDays, nil
}
var rules []CustomRetentionRule
if err := json.Unmarshal([]byte(row.Condition), &rules); err != nil {
return nil, 0, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "parse ttl_setting condition for row %q", row.ID.StringValue())
}
return rules, defaultDays, nil
}
// CustomRetentionRule is one custom retention rule as stored in ttl_setting.condition.
// Rules are evaluated in declaration order; the first matching rule wins.
type CustomRetentionRule struct {
Filters []FilterCondition `json:"conditions"`
TTLDays int `json:"ttlDays"`
}
// FilterCondition is one label-key, allowed-values condition inside a retention rule.
type FilterCondition struct {
Key string `json:"key"`
Values []string `json:"values"`
}
type TTLSetting struct {
bun.BaseModel `bun:"table:ttl_setting"`
types.Identifiable
@@ -148,73 +17,3 @@ type TTLSetting struct {
OrgID string `json:"-" bun:"org_id,notnull"`
Condition string `bun:"condition,type:text"`
}
type TTLParams struct {
Type string
ColdStorageVolume string
ToColdStorageDuration int64
DelDuration int64
}
type CustomRetentionTTLParams struct {
Type string `json:"type"`
DefaultTTLDays int `json:"defaultTTLDays"`
TTLConditions []CustomRetentionRule `json:"ttlConditions"`
ColdStorageVolume string `json:"coldStorageVolume,omitempty"`
ToColdStorageDurationDays int64 `json:"coldStorageDurationDays,omitempty"`
}
type GetTTLParams struct {
Type string
}
type GetCustomRetentionTTLResponse struct {
Version string `json:"version"`
Status string `json:"status"`
ExpectedLogsTime int `json:"expected_logs_ttl_duration_hrs,omitempty"`
ExpectedLogsMoveTime int `json:"expected_logs_move_ttl_duration_hrs,omitempty"`
DefaultTTLDays int `json:"default_ttl_days,omitempty"`
TTLConditions []CustomRetentionRule `json:"ttl_conditions,omitempty"`
ColdStorageVolume string `json:"cold_storage_volume,omitempty"`
ColdStorageTTLDays int `json:"cold_storage_ttl_days,omitempty"`
}
type CustomRetentionTTLResponse struct {
Message string `json:"message"`
}
type TTLStatusItem struct {
Id int `json:"id" db:"id"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
TableName string `json:"table_name" db:"table_name"`
TTL int `json:"ttl" db:"ttl"`
Status string `json:"status" db:"status"`
ColdStorageTtl int `json:"cold_storage_ttl" db:"cold_storage_ttl"`
}
type SetTTLResponseItem struct {
Message string `json:"message"`
}
type DBResponseTTL struct {
EngineFull string `ch:"engine_full"`
}
type GetTTLResponseItem struct {
MetricsTime int `json:"metrics_ttl_duration_hrs,omitempty"`
MetricsMoveTime int `json:"metrics_move_ttl_duration_hrs,omitempty"`
TracesTime int `json:"traces_ttl_duration_hrs,omitempty"`
TracesMoveTime int `json:"traces_move_ttl_duration_hrs,omitempty"`
LogsTime int `json:"logs_ttl_duration_hrs,omitempty"`
LogsMoveTime int `json:"logs_move_ttl_duration_hrs,omitempty"`
ExpectedMetricsTime int `json:"expected_metrics_ttl_duration_hrs,omitempty"`
ExpectedMetricsMoveTime int `json:"expected_metrics_move_ttl_duration_hrs,omitempty"`
ExpectedTracesTime int `json:"expected_traces_ttl_duration_hrs,omitempty"`
ExpectedTracesMoveTime int `json:"expected_traces_move_ttl_duration_hrs,omitempty"`
ExpectedLogsTime int `json:"expected_logs_ttl_duration_hrs,omitempty"`
ExpectedLogsMoveTime int `json:"expected_logs_move_ttl_duration_hrs,omitempty"`
Status string `json:"status"`
}

View File

@@ -1,83 +0,0 @@
package retentiontypes
import (
"encoding/json"
"testing"
"time"
"github.com/SigNoz/signoz/pkg/types"
"github.com/stretchr/testify/require"
)
func TestBuildRetentionPolicySegmentsFromRows(t *testing.T) {
start := time.Date(2026, 5, 4, 0, 0, 0, 0, time.UTC)
end := start.AddDate(0, 0, 1)
ruleA := CustomRetentionRule{
Filters: []FilterCondition{{Key: "service.name", Values: []string{"api"}}},
TTLDays: 7,
}
ruleB := CustomRetentionRule{
Filters: []FilterCondition{{Key: "env", Values: []string{"prod"}}},
TTLDays: 15,
}
t.Run("row before window is active at start", func(t *testing.T) {
segments, err := BuildRetentionPolicySegmentsFromRows(
[]*TTLSetting{
ttlSetting(t, start.Add(-time.Hour), 45, []CustomRetentionRule{ruleA}),
},
30,
start.UnixMilli(),
end.UnixMilli(),
)
require.NoError(t, err)
require.Equal(t, []*RetentionPolicySegment{
NewRetentionPolicySegment(start.UnixMilli(), end.UnixMilli(), []CustomRetentionRule{ruleA}, 45),
}, segments)
})
t.Run("row inside window splits segments", func(t *testing.T) {
firstChange := start.Add(6 * time.Hour)
secondChange := start.Add(18 * time.Hour)
segments, err := BuildRetentionPolicySegmentsFromRows(
[]*TTLSetting{
ttlSetting(t, firstChange, 21, []CustomRetentionRule{ruleA}),
ttlSetting(t, secondChange, 14, []CustomRetentionRule{ruleB}),
},
30,
start.UnixMilli(),
end.UnixMilli(),
)
require.NoError(t, err)
require.Equal(t, []*RetentionPolicySegment{
NewRetentionPolicySegment(start.UnixMilli(), firstChange.UnixMilli(), nil, 30),
NewRetentionPolicySegment(firstChange.UnixMilli(), secondChange.UnixMilli(), []CustomRetentionRule{ruleA}, 21),
NewRetentionPolicySegment(secondChange.UnixMilli(), end.UnixMilli(), []CustomRetentionRule{ruleB}, 14),
}, segments)
})
t.Run("no rows uses fallback", func(t *testing.T) {
segments, err := BuildRetentionPolicySegmentsFromRows(nil, 30, start.UnixMilli(), end.UnixMilli())
require.NoError(t, err)
require.Equal(t, []*RetentionPolicySegment{
NewRetentionPolicySegment(start.UnixMilli(), end.UnixMilli(), nil, 30),
}, segments)
})
}
func ttlSetting(t *testing.T, createdAt time.Time, ttlDays int, rules []CustomRetentionRule) *TTLSetting {
t.Helper()
condition, err := json.Marshal(rules)
require.NoError(t, err)
return &TTLSetting{
TimeAuditable: types.TimeAuditable{
CreatedAt: createdAt,
},
TTL: ttlDays,
Condition: string(condition),
}
}

View File

@@ -1,20 +0,0 @@
package zeustypes
import "go.opentelemetry.io/otel/attribute"
var (
// Identifies the organization.
OrganizationID = attribute.Key("signoz.organization.id")
// Identifies the retention bucket a meter belongs to.
RetentionDuration = attribute.Key("signoz.retention.duration")
)
func NewDimensions(kvs ...attribute.KeyValue) map[string]string {
dimensions := map[string]string{}
for _, kv := range kvs {
dimensions[string(kv.Key)] = kv.Value.AsString()
}
return dimensions
}

View File

@@ -1,29 +0,0 @@
package zeustypes
import (
"encoding/json"
)
type GettableDeployment struct {
ID string `json:"id"`
Name string `json:"name"`
Cluster struct {
ID string `json:"id"`
Name string `json:"name"`
Region struct {
ID string `json:"id"`
Name string `json:"name"`
DNS string `json:"dns"`
} `json:"region"`
} `json:"cluster"`
}
func NewGettableDeployment(data []byte) (*GettableDeployment, error) {
deployment := new(GettableDeployment)
err := json.Unmarshal(data, deployment)
if err != nil {
return nil, err
}
return deployment, nil
}

View File

@@ -1,46 +0,0 @@
package zeustypes
import (
"net/url"
"github.com/tidwall/gjson"
)
type Host struct {
Name string `json:"name" required:"true"`
IsDefault bool `json:"is_default" required:"true"`
URL string `json:"url" required:"true"`
}
type GettableHost struct {
Name string `json:"name" required:"true"`
State string `json:"state" required:"true"`
Tier string `json:"tier" required:"true"`
Hosts []Host `json:"hosts" required:"true"`
}
type PostableHost struct {
Name string `json:"name" required:"true"`
}
func NewGettableHost(data []byte) *GettableHost {
parsed := gjson.ParseBytes(data)
dns := parsed.Get("cluster.region.dns").String()
hostResults := parsed.Get("hosts").Array()
hosts := make([]Host, len(hostResults))
for i, h := range hostResults {
name := h.Get("name").String()
hosts[i].Name = name
hosts[i].IsDefault = h.Get("is_default").Bool()
hosts[i].URL = (&url.URL{Scheme: "https", Host: name + "." + dns}).String()
}
return &GettableHost{
Name: parsed.Get("name").String(),
State: parsed.Get("state").String(),
Tier: parsed.Get("tier").String(),
Hosts: hosts,
}
}

View File

@@ -1,54 +0,0 @@
package zeustypes
import "time"
type MeterCheckpoint struct {
Name string
StartDate time.Time
}
type Meter struct {
// MeterName is the fully-qualified meter identifier.
MeterName MeterName `json:"name"`
// Value is the aggregated integer scalar for this meter over the reporting window.
Value int64 `json:"value"`
// Unit is the metric unit for this meter.
Unit MeterUnit `json:"unit"`
// Aggregation names the aggregation applied to produce Value.
Aggregation MeterAggregation `json:"aggregation"`
// StartUnixMilli is the inclusive window start in epoch milliseconds.
StartUnixMilli int64 `json:"start_unix_milli"`
// EndUnixMilli is the exclusive window end in epoch milliseconds.
EndUnixMilli int64 `json:"end_unix_milli"`
// IsCompleted is false for the current day's partial value.
IsCompleted bool `json:"is_completed"`
// Dimensions is the per-meter label set.
Dimensions map[string]string `json:"dimensions"`
}
func NewMeter(
name MeterName,
value int64,
unit MeterUnit,
aggregation MeterAggregation,
window MeterWindow,
dimensions map[string]string,
) Meter {
return Meter{
MeterName: name,
Value: value,
Unit: unit,
Aggregation: aggregation,
StartUnixMilli: window.StartUnixMilli,
EndUnixMilli: window.EndUnixMilli,
IsCompleted: window.IsCompleted,
Dimensions: dimensions,
}
}

View File

@@ -1,12 +0,0 @@
package zeustypes
import "github.com/SigNoz/signoz/pkg/valuer"
type MeterAggregation struct {
valuer.String
}
var (
MeterAggregationSum = MeterAggregation{valuer.NewString("sum")}
MeterAggregationMax = MeterAggregation{valuer.NewString("max")}
)

View File

@@ -1,66 +0,0 @@
package zeustypes
import (
"encoding/json"
"regexp"
"github.com/SigNoz/signoz/pkg/errors"
)
var (
MeterSpanSize = MustNewMeterName("signoz.meter.span.size")
MeterLogSize = MustNewMeterName("signoz.meter.log.size")
MeterDatapointCount = MustNewMeterName("signoz.meter.metric.datapoint.count")
MeterPlatformActive = MustNewMeterName("signoz.meter.platform.active")
)
var meterNameRegex = regexp.MustCompile(`^[a-z][a-z0-9_.]+$`)
// MeterName is a validated dotted Zeus meter name.
type MeterName struct {
s string
}
func NewMeterName(s string) (MeterName, error) {
if !meterNameRegex.MatchString(s) {
return MeterName{}, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "invalid meter name: %s", s)
}
return MeterName{s: s}, nil
}
func MustNewMeterName(s string) MeterName {
name, err := NewMeterName(s)
if err != nil {
panic(err)
}
return name
}
func (n MeterName) String() string {
return n.s
}
func (n MeterName) IsZero() bool {
return n.s == ""
}
func (n MeterName) MarshalJSON() ([]byte, error) {
return json.Marshal(n.s)
}
func (n *MeterName) UnmarshalJSON(data []byte) error {
var s string
if err := json.Unmarshal(data, &s); err != nil {
return err
}
name, err := NewMeterName(s)
if err != nil {
return err
}
*n = name
return nil
}

View File

@@ -1,30 +0,0 @@
package zeustypes
import (
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestNewMeterWindow(t *testing.T) {
start := time.Date(2026, 5, 4, 0, 0, 0, 0, time.UTC)
window, err := NewMeterWindow(start.UnixMilli(), start.AddDate(0, 0, 1).UnixMilli(), true)
require.NoError(t, err)
require.Equal(t, start.UnixMilli(), window.StartUnixMilli)
require.Equal(t, start.AddDate(0, 0, 1).UnixMilli(), window.EndUnixMilli)
require.True(t, window.IsCompleted)
_, err = NewMeterWindow(0, start.UnixMilli(), true)
require.Error(t, err)
_, err = NewMeterWindow(start.UnixMilli(), start.UnixMilli(), false)
require.Error(t, err)
}
func TestMustNewMeterWindowPanicsForInvalidWindow(t *testing.T) {
require.Panics(t, func() {
MustNewMeterWindow(0, 0, true)
})
}

View File

@@ -1,12 +0,0 @@
package zeustypes
import "github.com/SigNoz/signoz/pkg/valuer"
type MeterUnit struct {
valuer.String
}
var (
MeterUnitCount = MeterUnit{valuer.NewString("count")}
MeterUnitBytes = MeterUnit{valuer.NewString("bytes")}
)

View File

@@ -1,43 +0,0 @@
package zeustypes
import (
"time"
"github.com/SigNoz/signoz/pkg/errors"
)
type MeterWindow struct {
StartUnixMilli int64
EndUnixMilli int64
IsCompleted bool
}
func NewMeterWindow(startUnixMilli, endUnixMilli int64, isCompleted bool) (MeterWindow, error) {
if startUnixMilli <= 0 {
return MeterWindow{}, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "meter window start must be positive: %d", startUnixMilli)
}
if endUnixMilli <= startUnixMilli {
return MeterWindow{}, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "meter window end must be after start: [%d, %d)", startUnixMilli, endUnixMilli)
}
return MeterWindow{
StartUnixMilli: startUnixMilli,
EndUnixMilli: endUnixMilli,
IsCompleted: isCompleted,
}, nil
}
func MustNewMeterWindow(startUnixMilli, endUnixMilli int64, isCompleted bool) MeterWindow {
window, err := NewMeterWindow(startUnixMilli, endUnixMilli, isCompleted)
if err != nil {
panic(err)
}
return window
}
func (w MeterWindow) Day() time.Time {
t := time.UnixMilli(w.StartUnixMilli).UTC()
return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.UTC)
}

View File

@@ -1,13 +0,0 @@
package zeustypes
type PostableProfile struct {
UsesOtel bool `json:"uses_otel" required:"true"`
HasExistingObservabilityTool bool `json:"has_existing_observability_tool" required:"true"`
ExistingObservabilityTool string `json:"existing_observability_tool" required:"true"`
ReasonsForInterestInSigNoz []string `json:"reasons_for_interest_in_signoz" required:"true"`
LogsScalePerDayInGB int64 `json:"logs_scale_per_day_in_gb" required:"true"`
NumberOfServices int64 `json:"number_of_services" required:"true"`
NumberOfHosts int64 `json:"number_of_hosts" required:"true"`
WhereDidYouDiscoverSigNoz string `json:"where_did_you_discover_signoz" required:"true"`
TimelineForMigratingToSigNoz string `json:"timeline_for_migrating_to_signoz" required:"true"`
}

View File

@@ -0,0 +1,87 @@
package zeustypes
import (
"encoding/json"
"net/url"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/tidwall/gjson"
)
type PostableHost struct {
Name string `json:"name" required:"true"`
}
type PostableProfile struct {
UsesOtel bool `json:"uses_otel" required:"true"`
HasExistingObservabilityTool bool `json:"has_existing_observability_tool" required:"true"`
ExistingObservabilityTool string `json:"existing_observability_tool" required:"true"`
ReasonsForInterestInSigNoz []string `json:"reasons_for_interest_in_signoz" required:"true"`
LogsScalePerDayInGB int64 `json:"logs_scale_per_day_in_gb" required:"true"`
NumberOfServices int64 `json:"number_of_services" required:"true"`
NumberOfHosts int64 `json:"number_of_hosts" required:"true"`
WhereDidYouDiscoverSigNoz string `json:"where_did_you_discover_signoz" required:"true"`
TimelineForMigratingToSigNoz string `json:"timeline_for_migrating_to_signoz" required:"true"`
}
type GettableHost struct {
Name string `json:"name" required:"true"`
State string `json:"state" required:"true"`
Tier string `json:"tier" required:"true"`
Hosts []Host `json:"hosts" required:"true"`
}
type Host struct {
Name string `json:"name" required:"true"`
IsDefault bool `json:"is_default" required:"true"`
URL string `json:"url" required:"true"`
}
func NewGettableHost(data []byte) *GettableHost {
parsed := gjson.ParseBytes(data)
dns := parsed.Get("cluster.region.dns").String()
hostResults := parsed.Get("hosts").Array()
hosts := make([]Host, len(hostResults))
for i, h := range hostResults {
name := h.Get("name").String()
hosts[i].Name = name
hosts[i].IsDefault = h.Get("is_default").Bool()
hosts[i].URL = (&url.URL{Scheme: "https", Host: name + "." + dns}).String()
}
return &GettableHost{
Name: parsed.Get("name").String(),
State: parsed.Get("state").String(),
Tier: parsed.Get("tier").String(),
Hosts: hosts,
}
}
// GettableDeployment represents the parsed deployment info from zeus.GetDeployment.
// NOTE: this is not a full response structure, add more fields from actual response as per requirement.
type GettableDeployment struct {
ID string `json:"id"`
Name string `json:"name"`
Cluster struct {
ID string `json:"id"`
Name string `json:"name"`
Region struct {
ID string `json:"id"`
Name string `json:"name"`
DNS string `json:"dns"`
} `json:"region"`
} `json:"cluster"`
}
// NewGettableDeployment parses raw GetDeployment bytes into a GettableDeployment.
func NewGettableDeployment(data []byte) (*GettableDeployment, error) {
deployment := new(GettableDeployment)
err := json.Unmarshal(data, deployment)
if err != nil {
return nil, errors.WrapInternalf(err, errors.CodeInternal, "failed to unmarshal deployment response")
}
return deployment, nil
}

View File

@@ -49,14 +49,6 @@ func (provider *provider) PutMetersV2(_ context.Context, _ string, _ []byte) err
return errors.New(errors.TypeUnsupported, zeus.ErrCodeUnsupported, "putting meters v2 is not supported")
}
func (provider *provider) PutMetersV3(_ context.Context, _ string, _ string, _ []byte) error {
return errors.New(errors.TypeUnsupported, zeus.ErrCodeUnsupported, "putting meters v3 is not supported")
}
func (provider *provider) ListMeterCheckpoints(_ context.Context, _ string) ([]zeustypes.MeterCheckpoint, error) {
return nil, errors.New(errors.TypeUnsupported, zeus.ErrCodeUnsupported, "list meter checkpoints is not supported")
}
func (provider *provider) PutProfile(_ context.Context, _ string, _ *zeustypes.PostableProfile) error {
return errors.New(errors.TypeUnsupported, zeus.ErrCodeUnsupported, "putting profile is not supported")
}

View File

@@ -35,16 +35,6 @@ type Zeus interface {
// Puts the meters for the given license key using Zeus.
PutMetersV2(context.Context, string, []byte) error
// PutMetersV3 ships one day's raw JSON array of meter readings to the
// v2/meters endpoint. idempotencyKey is propagated as X-Idempotency-Key so
// Zeus can UPSERT on retries.
PutMetersV3(ctx context.Context, licenseKey string, idempotencyKey string, body []byte) error
// ListMeterCheckpoints returns the latest sealed (is_completed=true) UTC day
// Zeus has stored for each billing meter name. Missing meter names are
// treated by the cron as bootstrap cases.
ListMeterCheckpoints(ctx context.Context, licenseKey string) ([]zeustypes.MeterCheckpoint, error)
// Put profile for the given license key.
PutProfile(context.Context, string, *zeustypes.PostableProfile) error

View File

@@ -6,7 +6,7 @@ import uuid
from abc import ABC
from collections.abc import Callable, Generator
from enum import Enum
from typing import Any
from typing import Any, Literal
from urllib.parse import urlparse
import numpy as np
@@ -236,6 +236,7 @@ class Traces(ABC):
attributes_number: dict[str, np.float64]
attributes_bool: dict[str, bool]
resources_string: dict[str, str]
resource_json: dict[str, str]
events: list[str]
links: str
response_status_code: str
@@ -273,6 +274,7 @@ class Traces(ABC):
links: list[TracesLink] = [],
trace_state: str = "",
flags: np.uint32 = 0,
resource_write_mode: Literal["legacy_only", "dual_write"] = "dual_write",
) -> None:
if timestamp is None:
timestamp = datetime.datetime.now()
@@ -322,8 +324,11 @@ class Traces(ABC):
self.db_name = ""
self.db_operation = ""
# Process resources and derive service_name
# Process resources and derive service_name. Spans written before the
# JSON-resource evolution time only populate resources_string (legacy_only);
# spans at or after the evolution time dual-write to both columns.
self.resources_string = {k: str(v) for k, v in resources.items()}
self.resource_json = {} if resource_write_mode == "legacy_only" else dict(self.resources_string)
self.service_name = self.resources_string.get("service.name", "default-service")
for k, v in self.resources_string.items():
@@ -575,7 +580,7 @@ class Traces(ABC):
self.db_operation,
self.has_error,
self.is_remote,
self.resources_string,
self.resource_json,
],
dtype=object,
)

View File

@@ -0,0 +1,240 @@
from collections.abc import Callable
from datetime import UTC, datetime, timedelta
from http import HTTPStatus
from fixtures import types
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.querier import (
build_group_by_field,
build_logs_aggregation,
index_series_by_label,
make_query_request,
)
from fixtures.traces import TraceIdGenerator, Traces
# we already create the evolution for resource during schema migration
# since we have to create test data around it, we need to get the evolution time
def _get_traces_resource_evolution_time_json(signoz: types.SigNoz) -> datetime:
result = signoz.telemetrystore.conn.query(
"""
SELECT release_time
FROM signoz_metadata.distributed_column_evolution_metadata
WHERE signal = 'traces'
AND field_context = 'resource'
AND field_name = '__all__'
AND column_name = 'resource'
LIMIT 1
"""
).result_rows
assert result, "Expected traces resource evolution metadata to exist"
release_time_ns = int(result[0][0])
return datetime.fromtimestamp(release_time_ns / 1e9, tz=UTC)
# Spans with timestamps before the evolution time will have resources written only to resources_string.
# Spans with timestamps at or after the evolution time will have resources written to both resources_string and resource (JSON).
def _build_evolved_span(
timestamp: datetime,
evolution_time: datetime,
service_name: str,
name: str,
) -> Traces:
resource_write_mode = "legacy_only" if timestamp < evolution_time else "dual_write"
return Traces(
timestamp=timestamp,
trace_id=TraceIdGenerator.trace_id(),
span_id=TraceIdGenerator.span_id(),
name=name,
resources={
"service.name": service_name,
"deployment.environment": "integration",
},
resource_write_mode=resource_write_mode,
)
def _query_grouped_trace_series(
signoz: types.SigNoz,
token: str,
start: datetime,
end: datetime,
group_by: str = "service.name",
aggregation: str = "count()",
) -> dict[str, list[dict]]:
response = make_query_request(
signoz,
token,
start_ms=int(start.timestamp() * 1000),
end_ms=int(end.timestamp() * 1000),
request_type="time_series",
queries=[
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "traces",
"stepInterval": 60,
"disabled": False,
"groupBy": [build_group_by_field(group_by)],
"having": {"expression": ""},
"aggregations": [build_logs_aggregation(aggregation)],
},
}
],
)
assert response.status_code == HTTPStatus.OK
assert response.json()["status"] == "success"
results = response.json()["data"]["data"]["results"]
assert len(results) == 1
aggregations = results[0]["aggregations"]
assert len(aggregations) == 1
return index_series_by_label(aggregations[0]["series"], group_by)
def _assert_grouped_series(
series_by_group: dict[str, dict],
expected_values_by_group: dict[str, dict[int, int]],
) -> None:
assert set(series_by_group.keys()) == set(expected_values_by_group.keys())
for group_name, expected_by_ts in expected_values_by_group.items():
actual_values = sorted(
series_by_group[group_name]["values"],
key=lambda value: value["timestamp"],
)
expected_values = [{"timestamp": timestamp, "value": value} for timestamp, value in sorted(expected_by_ts.items())]
assert actual_values == expected_values
def _test_traces_resource_evolution(
signoz: types.SigNoz,
token: str,
insert_traces: Callable[[list[Traces]], None],
) -> None:
"""
# 1. Get the evolution time.
# 2. Ingest spans before the evolution time.
# 3. Ingest spans after the evolution time.
# 4. Query the spans before the evolution time.
# 5. Query the spans after the evolution time.
# Both aggregation and group by should be checked.
"""
evolution_time = _get_traces_resource_evolution_time_json(signoz)
evolution_time = evolution_time.replace(second=0, microsecond=0)
before_2 = evolution_time - timedelta(minutes=10)
before_1 = evolution_time - timedelta(minutes=5)
after_1 = evolution_time + timedelta(minutes=5)
after_2 = evolution_time + timedelta(minutes=10)
insert_traces(
[
_build_evolved_span(
timestamp=before_2,
evolution_time=evolution_time,
service_name="svc-before-2",
name="span before evolution 2",
),
_build_evolved_span(
timestamp=before_1,
evolution_time=evolution_time,
service_name="svc-before-1",
name="span before evolution 1",
),
_build_evolved_span(
timestamp=after_1,
evolution_time=evolution_time,
service_name="svc-after-1",
name="span after evolution 1",
),
_build_evolved_span(
timestamp=after_2,
evolution_time=evolution_time,
service_name="svc-after-2",
name="span after evolution 2",
),
]
)
before_series = _query_grouped_trace_series(signoz, token, before_2 - timedelta(minutes=1), before_1 + timedelta(minutes=1))
_assert_grouped_series(
before_series,
expected_values_by_group={
"svc-before-2": {
int(before_2.timestamp() * 1000): 1,
},
"svc-before-1": {
int(before_1.timestamp() * 1000): 1,
},
},
)
after_series = _query_grouped_trace_series(signoz, token, after_1 - timedelta(minutes=1), after_2 + timedelta(minutes=1))
_assert_grouped_series(
after_series,
expected_values_by_group={
"svc-after-1": {
int(after_1.timestamp() * 1000): 1,
},
"svc-after-2": {
int(after_2.timestamp() * 1000): 1,
},
},
)
spanning_series = _query_grouped_trace_series(signoz, token, before_2, after_2 + timedelta(minutes=1))
_assert_grouped_series(
spanning_series,
expected_values_by_group={
"svc-before-2": {
int(before_2.timestamp() * 1000): 1,
},
"svc-before-1": {
int(before_1.timestamp() * 1000): 1,
},
"svc-after-1": {
int(after_1.timestamp() * 1000): 1,
},
"svc-after-2": {
int(after_2.timestamp() * 1000): 1,
},
},
)
# query to check aggregation on the resource field like count_distinct(service.name)
aggregation_series = _query_grouped_trace_series(
signoz,
token,
before_2,
after_2 + timedelta(minutes=1),
group_by="deployment.environment",
aggregation="count_distinct(service.name)",
)
_assert_grouped_series(
aggregation_series,
expected_values_by_group={
"integration": {
int(before_2.timestamp() * 1000): 1,
int(before_1.timestamp() * 1000): 1,
int(after_1.timestamp() * 1000): 1,
int(after_2.timestamp() * 1000): 1,
},
},
)
def test_traces_resource_evolution(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_traces: Callable[[list[Traces]], None],
) -> None:
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
_test_traces_resource_evolution(signoz, token, insert_traces)