Compare commits

...

65 Commits

Author SHA1 Message Date
Srikanth Chekuri
e1d73862c7 Merge branch 'main' into issue_3017 2026-02-25 10:47:11 +05:30
Nageshbansal
4e4c9ce5af chore: enable metadataexporter in docker (#10409)
Some checks failed
build-staging / prepare (push) Has been cancelled
build-staging / js-build (push) Has been cancelled
build-staging / go-build (push) Has been cancelled
build-staging / staging (push) Has been cancelled
Release Drafter / update_release_draft (push) Has been cancelled
2026-02-25 03:13:27 +05:30
Srikanth Chekuri
7605775a38 chore: remove support for non v5 version in rules (#10406) 2026-02-24 23:16:21 +05:30
nityanandagohain
108f03c7bd Merge remote-tracking branch 'origin/main' into issue_3017 2026-02-19 16:59:20 +05:30
nityanandagohain
ec5738032d fix: copy slice 2026-02-05 16:10:16 +05:30
nityanandagohain
4352c4de91 fix: final cleanup 2026-02-05 16:02:12 +05:30
nityanandagohain
6acbc7156d fix: add support for version in the evolutions 2026-02-05 15:24:55 +05:30
nityanandagohain
09f9a2d4f2 Merge remote-tracking branch 'origin/main' into issue_3017 2026-02-05 13:47:45 +05:30
nityanandagohain
6a5354df39 Merge remote-tracking branch 'origin/main' into issue_3017 2026-02-04 16:26:14 +05:30
nityanandagohain
ca9ff25314 fix: more fixes 2026-02-04 16:22:18 +05:30
nityanandagohain
07e66e8c24 Merge remote-tracking branch 'origin/main' into issue_3017 2026-02-04 11:25:24 +05:30
nityanandagohain
6283c6c26a fix: evolution metadata 2026-02-04 11:25:10 +05:30
nityanandagohain
3515e59a39 fix: minor refactoring and addressing comments 2026-02-03 10:48:51 +05:30
nityanandagohain
7756067914 fix: revert commented test 2026-02-03 10:17:13 +05:30
Nityananda Gohain
d3ef59cba7 Merge branch 'main' into issue_3017 2026-02-03 10:14:07 +05:30
nityanandagohain
81e33d59bb fix: address comments 2026-02-03 10:13:41 +05:30
nityanandagohain
a05957dc69 Merge remote-tracking branch 'origin/main' into issue_3017 2026-02-01 10:59:54 +05:30
nityanandagohain
24cf357b04 Merge remote-tracking branch 'origin/issue_3017' into issue_3017 2026-02-01 10:14:08 +05:30
nityanandagohain
91e4da28e6 fix: update conditionfor 2026-02-01 10:10:45 +05:30
Srikanth Chekuri
4cc727b7f8 Merge branch 'main' into issue_3017 2026-01-14 23:22:57 +05:30
nityanandagohain
9b24097a61 fix: edgecases 2026-01-14 00:20:33 +05:30
nityanandagohain
3a5d6b4493 fix: update query in adjust keys 2026-01-13 23:22:25 +05:30
nityanandagohain
d341f1f810 fix: minor cleanup 2026-01-13 21:52:21 +05:30
nityanandagohain
df1b47230a fix: add api 2026-01-13 01:48:21 +05:30
Nityananda Gohain
6261c9586f Merge branch 'main' into issue_3017 2026-01-13 01:47:01 +05:30
nityanandagohain
cda48874d2 fix: structural changes 2026-01-13 01:46:17 +05:30
nityanandagohain
277b6de266 fix: more changes 2026-01-12 19:43:09 +05:30
nityanandagohain
6f87ebe092 fix: fetch keys at the start 2026-01-12 18:00:29 +05:30
nityanandagohain
62c70715e0 Merge remote-tracking branch 'origin/main' into issue_3017 2026-01-12 16:27:52 +05:30
nityanandagohain
585a2b5282 fix: clean up evolution logic 2026-01-12 16:25:59 +05:30
nityanandagohain
6ad4c8ad8e fix: more changes 2026-01-09 23:34:44 +05:30
nityanandagohain
68df57965d fix: refactor code 2026-01-09 19:27:09 +05:30
nityanandagohain
d155cc6a10 fix: changes 2026-01-08 23:26:52 +05:30
Srikanth Chekuri
90a6902093 Merge branch 'main' into issue_3017 2026-01-08 18:43:57 +05:30
nityanandagohain
2bf92c9c2f fix: tests 2026-01-08 13:06:48 +05:30
nityanandagohain
aa2c1676b6 fix: tests 2026-01-08 12:59:13 +05:30
nityanandagohain
239c0f4e2e fix: merge conflicts 2026-01-08 12:51:58 +05:30
Srikanth Chekuri
97ecfdea23 Merge branch 'main' into issue_3017 2026-01-03 00:40:03 +05:30
nityanandagohain
6a02db8685 fix: address cursor comments 2026-01-02 15:53:41 +05:30
nityanandagohain
9f85dfb307 Merge remote-tracking branch 'origin/issue_3017' into issue_3017 2026-01-02 15:46:34 +05:30
nityanandagohain
ebc236857d fix: lint issues 2026-01-02 15:46:12 +05:30
Nityananda Gohain
0a1e252bb5 Merge branch 'main' into issue_3017 2026-01-02 15:34:05 +05:30
nityanandagohain
dd696bab13 fix: update the evolution metadata table 2026-01-02 15:32:05 +05:30
nityanandagohain
7f87103b30 fix: tests 2025-12-30 17:54:39 +05:30
nityanandagohain
726bd0ea7a Merge remote-tracking branch 'origin/main' into issue_3017 2025-12-30 15:34:37 +05:30
nityanandagohain
ab443c2d65 fix: make the evolution code reusable and propagte context properly 2025-12-30 15:33:58 +05:30
nityanandagohain
8be9a79d56 fix: use name from evolutions 2025-12-26 23:33:34 +05:30
nityanandagohain
471ad88971 fix: address changes and add tests 2025-12-26 23:25:43 +05:30
nityanandagohain
a5c46beeec Merge remote-tracking branch 'origin/main' into issue_3017 2025-12-23 18:21:40 +05:30
nityanandagohain
41f720950d fix: minor changes 2025-12-09 10:46:09 +07:00
nityanandagohain
d9bce4a3c6 fix: lint issues 2025-12-08 22:06:38 +07:00
nityanandagohain
a5ac40c33c fix: test 2025-12-08 21:40:30 +07:00
nityanandagohain
86b1366d4a fix: comments 2025-12-08 21:31:26 +07:00
nityanandagohain
eddb43a901 fix: aggregation 2025-12-08 21:30:07 +07:00
nityanandagohain
505cfe2314 fix: use orgId properly 2025-12-08 20:57:17 +07:00
nityanandagohain
6e54ee822a fix: use proper cache 2025-12-08 20:46:56 +07:00
nityanandagohain
d88cb8aba4 fix: minor changes 2025-12-08 20:18:34 +07:00
nityanandagohain
b823b2a1e1 fix: minor changes 2025-12-08 20:14:01 +07:00
nityanandagohain
7cfb7118a3 fix: update tests 2025-12-08 19:54:01 +07:00
nityanandagohain
59dfe7c0ed fix: remove goroutine 2025-12-08 19:11:22 +07:00
nityanandagohain
96b68b91c9 fix: update comment 2025-12-06 08:29:25 +05:30
nityanandagohain
be6ce8d4f1 fix: update fetch code 2025-12-06 08:27:53 +05:30
nityanandagohain
1fc58695c6 fix: tests 2025-12-06 06:47:29 +05:30
nityanandagohain
43450a187e Merge remote-tracking branch 'origin/main' into issue_3017 2025-12-06 05:15:26 +05:30
nityanandagohain
f4666d9c97 feat: time aware dynamic field mapper 2025-11-25 09:59:12 +05:30
55 changed files with 2326 additions and 389 deletions

View File

@@ -82,6 +82,12 @@ exporters:
timeout: 45s timeout: 45s
sending_queue: sending_queue:
enabled: false enabled: false
metadataexporter:
cache:
provider: in_memory
dsn: tcp://clickhouse:9000/signoz_metadata
enabled: true
timeout: 45s
service: service:
telemetry: telemetry:
logs: logs:
@@ -93,19 +99,19 @@ service:
traces: traces:
receivers: [otlp] receivers: [otlp]
processors: [signozspanmetrics/delta, batch] processors: [signozspanmetrics/delta, batch]
exporters: [clickhousetraces, signozmeter] exporters: [clickhousetraces, metadataexporter, signozmeter]
metrics: metrics:
receivers: [otlp] receivers: [otlp]
processors: [batch] processors: [batch]
exporters: [signozclickhousemetrics, signozmeter] exporters: [signozclickhousemetrics, metadataexporter, signozmeter]
metrics/prometheus: metrics/prometheus:
receivers: [prometheus] receivers: [prometheus]
processors: [batch] processors: [batch]
exporters: [signozclickhousemetrics, signozmeter] exporters: [signozclickhousemetrics, metadataexporter, signozmeter]
logs: logs:
receivers: [otlp] receivers: [otlp]
processors: [batch] processors: [batch]
exporters: [clickhouselogsexporter, signozmeter] exporters: [clickhouselogsexporter, metadataexporter, signozmeter]
metrics/meter: metrics/meter:
receivers: [signozmeter] receivers: [signozmeter]
processors: [batch/meter] processors: [batch/meter]

View File

@@ -82,6 +82,12 @@ exporters:
timeout: 45s timeout: 45s
sending_queue: sending_queue:
enabled: false enabled: false
metadataexporter:
cache:
provider: in_memory
dsn: tcp://clickhouse:9000/signoz_metadata
enabled: true
timeout: 45s
service: service:
telemetry: telemetry:
logs: logs:
@@ -93,19 +99,19 @@ service:
traces: traces:
receivers: [otlp] receivers: [otlp]
processors: [signozspanmetrics/delta, batch] processors: [signozspanmetrics/delta, batch]
exporters: [clickhousetraces, signozmeter] exporters: [clickhousetraces, metadataexporter, signozmeter]
metrics: metrics:
receivers: [otlp] receivers: [otlp]
processors: [batch] processors: [batch]
exporters: [signozclickhousemetrics, signozmeter] exporters: [signozclickhousemetrics, metadataexporter, signozmeter]
metrics/prometheus: metrics/prometheus:
receivers: [prometheus] receivers: [prometheus]
processors: [batch] processors: [batch]
exporters: [signozclickhousemetrics, signozmeter] exporters: [signozclickhousemetrics, metadataexporter, signozmeter]
logs: logs:
receivers: [otlp] receivers: [otlp]
processors: [batch] processors: [batch]
exporters: [clickhouselogsexporter, signozmeter] exporters: [clickhouselogsexporter, metadataexporter, signozmeter]
metrics/meter: metrics/meter:
receivers: [signozmeter] receivers: [signozmeter]
processors: [batch/meter] processors: [batch/meter]

View File

@@ -712,18 +712,18 @@ func (m *module) buildFilterClause(ctx context.Context, filter *qbtypes.Filter,
} }
opts := querybuilder.FilterExprVisitorOpts{ opts := querybuilder.FilterExprVisitorOpts{
Context: ctx,
Logger: m.logger, Logger: m.logger,
FieldMapper: m.fieldMapper, FieldMapper: m.fieldMapper,
ConditionBuilder: m.condBuilder, ConditionBuilder: m.condBuilder,
FullTextColumn: &telemetrytypes.TelemetryFieldKey{ FullTextColumn: &telemetrytypes.TelemetryFieldKey{
Name: "labels"}, Name: "labels"},
FieldKeys: keys, FieldKeys: keys,
StartNs: querybuilder.ToNanoSecs(uint64(startMillis)),
EndNs: querybuilder.ToNanoSecs(uint64(endMillis)),
} }
startNs := querybuilder.ToNanoSecs(uint64(startMillis)) whereClause, err := querybuilder.PrepareWhereClause(expression, opts)
endNs := querybuilder.ToNanoSecs(uint64(endMillis))
whereClause, err := querybuilder.PrepareWhereClause(expression, opts, startNs, endNs)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@@ -66,6 +66,7 @@ func newProvider(
telemetrylogs.LogResourceKeysTblName, telemetrylogs.LogResourceKeysTblName,
telemetrymetadata.DBName, telemetrymetadata.DBName,
telemetrymetadata.AttributesMetadataLocalTableName, telemetrymetadata.AttributesMetadataLocalTableName,
telemetrymetadata.ColumnEvolutionMetadataTableName,
) )
// Create trace statement builder // Create trace statement builder

View File

@@ -48,6 +48,8 @@ func NewAggExprRewriter(
// and the args if the parametric aggregation function is used. // and the args if the parametric aggregation function is used.
func (r *aggExprRewriter) Rewrite( func (r *aggExprRewriter) Rewrite(
ctx context.Context, ctx context.Context,
startNs uint64,
endNs uint64,
expr string, expr string,
rateInterval uint64, rateInterval uint64,
keys map[string][]*telemetrytypes.TelemetryFieldKey, keys map[string][]*telemetrytypes.TelemetryFieldKey,
@@ -74,7 +76,12 @@ func (r *aggExprRewriter) Rewrite(
return "", nil, errors.NewInternalf(errors.CodeInternal, "no SELECT items for %q", expr) return "", nil, errors.NewInternalf(errors.CodeInternal, "no SELECT items for %q", expr)
} }
visitor := newExprVisitor(r.logger, keys, visitor := newExprVisitor(
ctx,
startNs,
endNs,
r.logger,
keys,
r.fullTextColumn, r.fullTextColumn,
r.fieldMapper, r.fieldMapper,
r.conditionBuilder, r.conditionBuilder,
@@ -94,6 +101,8 @@ func (r *aggExprRewriter) Rewrite(
// RewriteMulti rewrites a slice of expressions. // RewriteMulti rewrites a slice of expressions.
func (r *aggExprRewriter) RewriteMulti( func (r *aggExprRewriter) RewriteMulti(
ctx context.Context, ctx context.Context,
startNs uint64,
endNs uint64,
exprs []string, exprs []string,
rateInterval uint64, rateInterval uint64,
keys map[string][]*telemetrytypes.TelemetryFieldKey, keys map[string][]*telemetrytypes.TelemetryFieldKey,
@@ -102,7 +111,7 @@ func (r *aggExprRewriter) RewriteMulti(
var errs []error var errs []error
var chArgsList [][]any var chArgsList [][]any
for i, e := range exprs { for i, e := range exprs {
w, chArgs, err := r.Rewrite(ctx, e, rateInterval, keys) w, chArgs, err := r.Rewrite(ctx, startNs, endNs, e, rateInterval, keys)
if err != nil { if err != nil {
errs = append(errs, err) errs = append(errs, err)
out[i] = e out[i] = e
@@ -119,6 +128,9 @@ func (r *aggExprRewriter) RewriteMulti(
// exprVisitor walks FunctionExpr nodes and applies the mappers. // exprVisitor walks FunctionExpr nodes and applies the mappers.
type exprVisitor struct { type exprVisitor struct {
ctx context.Context
startNs uint64
endNs uint64
chparser.DefaultASTVisitor chparser.DefaultASTVisitor
logger *slog.Logger logger *slog.Logger
fieldKeys map[string][]*telemetrytypes.TelemetryFieldKey fieldKeys map[string][]*telemetrytypes.TelemetryFieldKey
@@ -132,6 +144,9 @@ type exprVisitor struct {
} }
func newExprVisitor( func newExprVisitor(
ctx context.Context,
startNs uint64,
endNs uint64,
logger *slog.Logger, logger *slog.Logger,
fieldKeys map[string][]*telemetrytypes.TelemetryFieldKey, fieldKeys map[string][]*telemetrytypes.TelemetryFieldKey,
fullTextColumn *telemetrytypes.TelemetryFieldKey, fullTextColumn *telemetrytypes.TelemetryFieldKey,
@@ -140,6 +155,9 @@ func newExprVisitor(
jsonKeyToKey qbtypes.JsonKeyToFieldFunc, jsonKeyToKey qbtypes.JsonKeyToFieldFunc,
) *exprVisitor { ) *exprVisitor {
return &exprVisitor{ return &exprVisitor{
ctx: ctx,
startNs: startNs,
endNs: endNs,
logger: logger, logger: logger,
fieldKeys: fieldKeys, fieldKeys: fieldKeys,
fullTextColumn: fullTextColumn, fullTextColumn: fullTextColumn,
@@ -186,13 +204,16 @@ func (v *exprVisitor) VisitFunctionExpr(fn *chparser.FunctionExpr) error {
whereClause, err := PrepareWhereClause( whereClause, err := PrepareWhereClause(
origPred, origPred,
FilterExprVisitorOpts{ FilterExprVisitorOpts{
Context: v.ctx,
Logger: v.logger, Logger: v.logger,
FieldKeys: v.fieldKeys, FieldKeys: v.fieldKeys,
FieldMapper: v.fieldMapper, FieldMapper: v.fieldMapper,
ConditionBuilder: v.conditionBuilder, ConditionBuilder: v.conditionBuilder,
FullTextColumn: v.fullTextColumn, FullTextColumn: v.fullTextColumn,
JsonKeyToKey: v.jsonKeyToKey, JsonKeyToKey: v.jsonKeyToKey,
}, 0, 0, StartNs: v.startNs,
EndNs: v.endNs,
},
) )
if err != nil { if err != nil {
return err return err
@@ -212,7 +233,7 @@ func (v *exprVisitor) VisitFunctionExpr(fn *chparser.FunctionExpr) error {
for i := 0; i < len(args)-1; i++ { for i := 0; i < len(args)-1; i++ {
origVal := args[i].String() origVal := args[i].String()
fieldKey := telemetrytypes.GetFieldKeyFromKeyText(origVal) fieldKey := telemetrytypes.GetFieldKeyFromKeyText(origVal)
expr, exprArgs, err := CollisionHandledFinalExpr(context.Background(), &fieldKey, v.fieldMapper, v.conditionBuilder, v.fieldKeys, dataType, v.jsonKeyToKey) expr, exprArgs, err := CollisionHandledFinalExpr(v.ctx, v.startNs, v.endNs, &fieldKey, v.fieldMapper, v.conditionBuilder, v.fieldKeys, dataType, v.jsonKeyToKey)
if err != nil { if err != nil {
return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "failed to get table field name for %q", origVal) return errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "failed to get table field name for %q", origVal)
} }
@@ -230,7 +251,7 @@ func (v *exprVisitor) VisitFunctionExpr(fn *chparser.FunctionExpr) error {
for i, arg := range args { for i, arg := range args {
orig := arg.String() orig := arg.String()
fieldKey := telemetrytypes.GetFieldKeyFromKeyText(orig) fieldKey := telemetrytypes.GetFieldKeyFromKeyText(orig)
expr, exprArgs, err := CollisionHandledFinalExpr(context.Background(), &fieldKey, v.fieldMapper, v.conditionBuilder, v.fieldKeys, dataType, v.jsonKeyToKey) expr, exprArgs, err := CollisionHandledFinalExpr(v.ctx, v.startNs, v.endNs, &fieldKey, v.fieldMapper, v.conditionBuilder, v.fieldKeys, dataType, v.jsonKeyToKey)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -153,6 +153,7 @@ func AdjustKey(key *telemetrytypes.TelemetryFieldKey, keys map[string][]*telemet
key.Indexes = intrinsicOrCalculatedField.Indexes key.Indexes = intrinsicOrCalculatedField.Indexes
key.Materialized = intrinsicOrCalculatedField.Materialized key.Materialized = intrinsicOrCalculatedField.Materialized
key.JSONPlan = intrinsicOrCalculatedField.JSONPlan key.JSONPlan = intrinsicOrCalculatedField.JSONPlan
key.Evolutions = intrinsicOrCalculatedField.Evolutions
return actions return actions
} }
@@ -205,6 +206,7 @@ func AdjustKey(key *telemetrytypes.TelemetryFieldKey, keys map[string][]*telemet
key.Indexes = matchingKey.Indexes key.Indexes = matchingKey.Indexes
key.Materialized = matchingKey.Materialized key.Materialized = matchingKey.Materialized
key.JSONPlan = matchingKey.JSONPlan key.JSONPlan = matchingKey.JSONPlan
key.Evolutions = matchingKey.Evolutions
return actions return actions
} else { } else {

View File

@@ -19,6 +19,8 @@ import (
func CollisionHandledFinalExpr( func CollisionHandledFinalExpr(
ctx context.Context, ctx context.Context,
startNs uint64,
endNs uint64,
field *telemetrytypes.TelemetryFieldKey, field *telemetrytypes.TelemetryFieldKey,
fm qbtypes.FieldMapper, fm qbtypes.FieldMapper,
cb qbtypes.ConditionBuilder, cb qbtypes.ConditionBuilder,
@@ -44,7 +46,7 @@ func CollisionHandledFinalExpr(
addCondition := func(key *telemetrytypes.TelemetryFieldKey) error { addCondition := func(key *telemetrytypes.TelemetryFieldKey) error {
sb := sqlbuilder.NewSelectBuilder() sb := sqlbuilder.NewSelectBuilder()
condition, err := cb.ConditionFor(ctx, key, qbtypes.FilterOperatorExists, nil, sb, 0, 0) condition, err := cb.ConditionFor(ctx, startNs, endNs, key, qbtypes.FilterOperatorExists, nil, sb)
if err != nil { if err != nil {
return err return err
} }
@@ -57,7 +59,7 @@ func CollisionHandledFinalExpr(
return nil return nil
} }
colName, fieldForErr := fm.FieldFor(ctx, field) colName, fieldForErr := fm.FieldFor(ctx, startNs, endNs, field)
if errors.Is(fieldForErr, qbtypes.ErrColumnNotFound) { if errors.Is(fieldForErr, qbtypes.ErrColumnNotFound) {
// the key didn't have the right context to be added to the query // the key didn't have the right context to be added to the query
// we try to use the context we know of // we try to use the context we know of
@@ -92,7 +94,7 @@ func CollisionHandledFinalExpr(
if err != nil { if err != nil {
return "", nil, err return "", nil, err
} }
colName, _ = fm.FieldFor(ctx, key) colName, _ = fm.FieldFor(ctx, startNs, endNs, key)
colName, _ = DataTypeCollisionHandledFieldName(key, dummyValue, colName, qbtypes.FilterOperatorUnknown) colName, _ = DataTypeCollisionHandledFieldName(key, dummyValue, colName, qbtypes.FilterOperatorUnknown)
stmts = append(stmts, colName) stmts = append(stmts, colName)
} }

View File

@@ -44,12 +44,12 @@ func keyIndexFilter(key *telemetrytypes.TelemetryFieldKey) any {
func (b *defaultConditionBuilder) ConditionFor( func (b *defaultConditionBuilder) ConditionFor(
ctx context.Context, ctx context.Context,
startNs uint64,
endNs uint64,
key *telemetrytypes.TelemetryFieldKey, key *telemetrytypes.TelemetryFieldKey,
op qbtypes.FilterOperator, op qbtypes.FilterOperator,
value any, value any,
sb *sqlbuilder.SelectBuilder, sb *sqlbuilder.SelectBuilder,
_ uint64,
_ uint64,
) (string, error) { ) (string, error) {
if key.FieldContext != telemetrytypes.FieldContextResource { if key.FieldContext != telemetrytypes.FieldContextResource {
@@ -60,15 +60,17 @@ func (b *defaultConditionBuilder) ConditionFor(
// as we store resource values as string // as we store resource values as string
formattedValue := querybuilder.FormatValueForContains(value) formattedValue := querybuilder.FormatValueForContains(value)
column, err := b.fm.ColumnFor(ctx, key) columns, err := b.fm.ColumnFor(ctx, startNs, endNs, key)
if err != nil { if err != nil {
return "", err return "", err
} }
// resource evolution on main table doesn't affect this as we not changing the resource column in the resource fingerprint table.
column := columns[0]
keyIdxFilter := sb.Like(column.Name, keyIndexFilter(key)) keyIdxFilter := sb.Like(column.Name, keyIndexFilter(key))
valueForIndexFilter := valueForIndexFilter(op, key, value) valueForIndexFilter := valueForIndexFilter(op, key, value)
fieldName, err := b.fm.FieldFor(ctx, key) fieldName, err := b.fm.FieldFor(ctx, startNs, endNs, key)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@@ -206,7 +206,7 @@ func TestConditionBuilder(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
sb := sqlbuilder.NewSelectBuilder() sb := sqlbuilder.NewSelectBuilder()
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
cond, err := conditionBuilder.ConditionFor(context.Background(), tc.key, tc.op, tc.value, sb, 0, 0) cond, err := conditionBuilder.ConditionFor(context.Background(), 0, 0, tc.key, tc.op, tc.value, sb)
sb.Where(cond) sb.Where(cond)
if tc.expectedErr != nil { if tc.expectedErr != nil {

View File

@@ -27,44 +27,48 @@ func NewFieldMapper() *defaultFieldMapper {
func (m *defaultFieldMapper) getColumn( func (m *defaultFieldMapper) getColumn(
_ context.Context, _ context.Context,
_, _ uint64,
key *telemetrytypes.TelemetryFieldKey, key *telemetrytypes.TelemetryFieldKey,
) (*schema.Column, error) { ) ([]*schema.Column, error) {
if key.FieldContext == telemetrytypes.FieldContextResource { if key.FieldContext == telemetrytypes.FieldContextResource {
return resourceColumns["labels"], nil return []*schema.Column{resourceColumns["labels"]}, nil
} }
if col, ok := resourceColumns[key.Name]; ok { if col, ok := resourceColumns[key.Name]; ok {
return col, nil return []*schema.Column{col}, nil
} }
return nil, qbtypes.ErrColumnNotFound return nil, qbtypes.ErrColumnNotFound
} }
func (m *defaultFieldMapper) ColumnFor( func (m *defaultFieldMapper) ColumnFor(
ctx context.Context, ctx context.Context,
tsStart, tsEnd uint64,
key *telemetrytypes.TelemetryFieldKey, key *telemetrytypes.TelemetryFieldKey,
) (*schema.Column, error) { ) ([]*schema.Column, error) {
return m.getColumn(ctx, key) return m.getColumn(ctx, tsStart, tsEnd, key)
} }
func (m *defaultFieldMapper) FieldFor( func (m *defaultFieldMapper) FieldFor(
ctx context.Context, ctx context.Context,
tsStart, tsEnd uint64,
key *telemetrytypes.TelemetryFieldKey, key *telemetrytypes.TelemetryFieldKey,
) (string, error) { ) (string, error) {
column, err := m.getColumn(ctx, key) columns, err := m.getColumn(ctx, tsStart, tsEnd, key)
if err != nil { if err != nil {
return "", err return "", err
} }
if key.FieldContext == telemetrytypes.FieldContextResource { if key.FieldContext == telemetrytypes.FieldContextResource {
return fmt.Sprintf("simpleJSONExtractString(%s, '%s')", column.Name, key.Name), nil return fmt.Sprintf("simpleJSONExtractString(%s, '%s')", columns[0].Name, key.Name), nil
} }
return column.Name, nil return columns[0].Name, nil
} }
func (m *defaultFieldMapper) ColumnExpressionFor( func (m *defaultFieldMapper) ColumnExpressionFor(
ctx context.Context, ctx context.Context,
tsStart, tsEnd uint64,
key *telemetrytypes.TelemetryFieldKey, key *telemetrytypes.TelemetryFieldKey,
_ map[string][]*telemetrytypes.TelemetryFieldKey, _ map[string][]*telemetrytypes.TelemetryFieldKey,
) (string, error) { ) (string, error) {
colName, err := m.FieldFor(ctx, key) colName, err := m.FieldFor(ctx, tsStart, tsEnd, key)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@@ -148,7 +148,7 @@ func (b *resourceFilterStatementBuilder[T]) Build(
// addConditions adds both filter and time conditions to the query // addConditions adds both filter and time conditions to the query
func (b *resourceFilterStatementBuilder[T]) addConditions( func (b *resourceFilterStatementBuilder[T]) addConditions(
_ context.Context, ctx context.Context,
sb *sqlbuilder.SelectBuilder, sb *sqlbuilder.SelectBuilder,
start, end uint64, start, end uint64,
query qbtypes.QueryBuilderQuery[T], query qbtypes.QueryBuilderQuery[T],
@@ -160,6 +160,7 @@ func (b *resourceFilterStatementBuilder[T]) addConditions(
// warnings would be encountered as part of the main condition already // warnings would be encountered as part of the main condition already
filterWhereClause, err := querybuilder.PrepareWhereClause(query.Filter.Expression, querybuilder.FilterExprVisitorOpts{ filterWhereClause, err := querybuilder.PrepareWhereClause(query.Filter.Expression, querybuilder.FilterExprVisitorOpts{
Context: ctx,
Logger: b.logger, Logger: b.logger,
FieldMapper: b.fieldMapper, FieldMapper: b.fieldMapper,
ConditionBuilder: b.conditionBuilder, ConditionBuilder: b.conditionBuilder,
@@ -171,7 +172,9 @@ func (b *resourceFilterStatementBuilder[T]) addConditions(
// there is no need for "key" not found error for resource filtering // there is no need for "key" not found error for resource filtering
IgnoreNotFoundKeys: true, IgnoreNotFoundKeys: true,
Variables: variables, Variables: variables,
}, start, end) StartNs: start,
EndNs: end,
})
if err != nil { if err != nil {
return err return err

View File

@@ -23,6 +23,7 @@ const stringMatchingOperatorDocURL = "https://signoz.io/docs/userguide/operators
// filterExpressionVisitor implements the FilterQueryVisitor interface // filterExpressionVisitor implements the FilterQueryVisitor interface
// to convert the parsed filter expressions into ClickHouse WHERE clause // to convert the parsed filter expressions into ClickHouse WHERE clause
type filterExpressionVisitor struct { type filterExpressionVisitor struct {
context context.Context
logger *slog.Logger logger *slog.Logger
fieldMapper qbtypes.FieldMapper fieldMapper qbtypes.FieldMapper
conditionBuilder qbtypes.ConditionBuilder conditionBuilder qbtypes.ConditionBuilder
@@ -46,6 +47,7 @@ type filterExpressionVisitor struct {
} }
type FilterExprVisitorOpts struct { type FilterExprVisitorOpts struct {
Context context.Context
Logger *slog.Logger Logger *slog.Logger
FieldMapper qbtypes.FieldMapper FieldMapper qbtypes.FieldMapper
ConditionBuilder qbtypes.ConditionBuilder ConditionBuilder qbtypes.ConditionBuilder
@@ -65,6 +67,7 @@ type FilterExprVisitorOpts struct {
// newFilterExpressionVisitor creates a new filterExpressionVisitor // newFilterExpressionVisitor creates a new filterExpressionVisitor
func newFilterExpressionVisitor(opts FilterExprVisitorOpts) *filterExpressionVisitor { func newFilterExpressionVisitor(opts FilterExprVisitorOpts) *filterExpressionVisitor {
return &filterExpressionVisitor{ return &filterExpressionVisitor{
context: opts.Context,
logger: opts.Logger, logger: opts.Logger,
fieldMapper: opts.FieldMapper, fieldMapper: opts.FieldMapper,
conditionBuilder: opts.ConditionBuilder, conditionBuilder: opts.ConditionBuilder,
@@ -90,7 +93,7 @@ type PreparedWhereClause struct {
} }
// PrepareWhereClause generates a ClickHouse compatible WHERE clause from the filter query // PrepareWhereClause generates a ClickHouse compatible WHERE clause from the filter query
func PrepareWhereClause(query string, opts FilterExprVisitorOpts, startNs uint64, endNs uint64) (*PreparedWhereClause, error) { func PrepareWhereClause(query string, opts FilterExprVisitorOpts) (*PreparedWhereClause, error) {
// Setup the ANTLR parsing pipeline // Setup the ANTLR parsing pipeline
input := antlr.NewInputStream(query) input := antlr.NewInputStream(query)
@@ -124,8 +127,6 @@ func PrepareWhereClause(query string, opts FilterExprVisitorOpts, startNs uint64
} }
tokens.Reset() tokens.Reset()
opts.StartNs = startNs
opts.EndNs = endNs
visitor := newFilterExpressionVisitor(opts) visitor := newFilterExpressionVisitor(opts)
// Handle syntax errors // Handle syntax errors
@@ -317,7 +318,7 @@ func (v *filterExpressionVisitor) VisitPrimary(ctx *grammar.PrimaryContext) any
// create a full text search condition on the body field // create a full text search condition on the body field
keyText := keyCtx.GetText() keyText := keyCtx.GetText()
cond, err := v.conditionBuilder.ConditionFor(context.Background(), v.fullTextColumn, qbtypes.FilterOperatorRegexp, FormatFullTextSearch(keyText), v.builder, v.startNs, v.endNs) cond, err := v.conditionBuilder.ConditionFor(v.context, v.startNs, v.endNs, v.fullTextColumn, qbtypes.FilterOperatorRegexp, FormatFullTextSearch(keyText), v.builder)
if err != nil { if err != nil {
v.errors = append(v.errors, fmt.Sprintf("failed to build full text search condition: %s", err.Error())) v.errors = append(v.errors, fmt.Sprintf("failed to build full text search condition: %s", err.Error()))
return "" return ""
@@ -337,7 +338,7 @@ func (v *filterExpressionVisitor) VisitPrimary(ctx *grammar.PrimaryContext) any
v.errors = append(v.errors, fmt.Sprintf("unsupported value type: %s", valCtx.GetText())) v.errors = append(v.errors, fmt.Sprintf("unsupported value type: %s", valCtx.GetText()))
return "" return ""
} }
cond, err := v.conditionBuilder.ConditionFor(context.Background(), v.fullTextColumn, qbtypes.FilterOperatorRegexp, FormatFullTextSearch(text), v.builder, v.startNs, v.endNs) cond, err := v.conditionBuilder.ConditionFor(v.context, v.startNs, v.endNs, v.fullTextColumn, qbtypes.FilterOperatorRegexp, FormatFullTextSearch(text), v.builder)
if err != nil { if err != nil {
v.errors = append(v.errors, fmt.Sprintf("failed to build full text search condition: %s", err.Error())) v.errors = append(v.errors, fmt.Sprintf("failed to build full text search condition: %s", err.Error()))
return "" return ""
@@ -381,7 +382,7 @@ func (v *filterExpressionVisitor) VisitComparison(ctx *grammar.ComparisonContext
} }
var conds []string var conds []string
for _, key := range keys { for _, key := range keys {
condition, err := v.conditionBuilder.ConditionFor(context.Background(), key, op, nil, v.builder, v.startNs, v.endNs) condition, err := v.conditionBuilder.ConditionFor(v.context, v.startNs, v.endNs, key, op, nil, v.builder)
if err != nil { if err != nil {
return "" return ""
} }
@@ -453,7 +454,7 @@ func (v *filterExpressionVisitor) VisitComparison(ctx *grammar.ComparisonContext
} }
var conds []string var conds []string
for _, key := range keys { for _, key := range keys {
condition, err := v.conditionBuilder.ConditionFor(context.Background(), key, op, values, v.builder, v.startNs, v.endNs) condition, err := v.conditionBuilder.ConditionFor(v.context, v.startNs, v.endNs, key, op, values, v.builder)
if err != nil { if err != nil {
return "" return ""
} }
@@ -501,7 +502,7 @@ func (v *filterExpressionVisitor) VisitComparison(ctx *grammar.ComparisonContext
var conds []string var conds []string
for _, key := range keys { for _, key := range keys {
condition, err := v.conditionBuilder.ConditionFor(context.Background(), key, op, []any{value1, value2}, v.builder, v.startNs, v.endNs) condition, err := v.conditionBuilder.ConditionFor(v.context, v.startNs, v.endNs, key, op, []any{value1, value2}, v.builder)
if err != nil { if err != nil {
return "" return ""
} }
@@ -586,7 +587,7 @@ func (v *filterExpressionVisitor) VisitComparison(ctx *grammar.ComparisonContext
var conds []string var conds []string
for _, key := range keys { for _, key := range keys {
condition, err := v.conditionBuilder.ConditionFor(context.Background(), key, op, value, v.builder, v.startNs, v.endNs) condition, err := v.conditionBuilder.ConditionFor(v.context, v.startNs, v.endNs, key, op, value, v.builder)
if err != nil { if err != nil {
v.errors = append(v.errors, fmt.Sprintf("failed to build condition: %s", err.Error())) v.errors = append(v.errors, fmt.Sprintf("failed to build condition: %s", err.Error()))
return "" return ""
@@ -665,7 +666,7 @@ func (v *filterExpressionVisitor) VisitFullText(ctx *grammar.FullTextContext) an
v.errors = append(v.errors, "full text search is not supported") v.errors = append(v.errors, "full text search is not supported")
return "" return ""
} }
cond, err := v.conditionBuilder.ConditionFor(context.Background(), v.fullTextColumn, qbtypes.FilterOperatorRegexp, FormatFullTextSearch(text), v.builder, v.startNs, v.endNs) cond, err := v.conditionBuilder.ConditionFor(v.context, v.startNs, v.endNs, v.fullTextColumn, qbtypes.FilterOperatorRegexp, FormatFullTextSearch(text), v.builder)
if err != nil { if err != nil {
v.errors = append(v.errors, fmt.Sprintf("failed to build full text search condition: %s", err.Error())) v.errors = append(v.errors, fmt.Sprintf("failed to build full text search condition: %s", err.Error()))
return "" return ""
@@ -750,13 +751,13 @@ func (v *filterExpressionVisitor) VisitFunctionCall(ctx *grammar.FunctionCallCon
if key.FieldContext == telemetrytypes.FieldContextBody { if key.FieldContext == telemetrytypes.FieldContextBody {
var err error var err error
if BodyJSONQueryEnabled { if BodyJSONQueryEnabled {
fieldName, err = v.fieldMapper.FieldFor(context.Background(), key) fieldName, err = v.fieldMapper.FieldFor(v.context, v.startNs, v.endNs, key)
if err != nil { if err != nil {
v.errors = append(v.errors, fmt.Sprintf("failed to get field name for key %s: %s", key.Name, err.Error())) v.errors = append(v.errors, fmt.Sprintf("failed to get field name for key %s: %s", key.Name, err.Error()))
return "" return ""
} }
} else { } else {
fieldName, _ = v.jsonKeyToKey(context.Background(), key, qbtypes.FilterOperatorUnknown, value) fieldName, _ = v.jsonKeyToKey(v.context, key, qbtypes.FilterOperatorUnknown, value)
} }
} else { } else {
// TODO(add docs for json body search) // TODO(add docs for json body search)

View File

@@ -1,6 +1,7 @@
package querybuilder package querybuilder
import ( import (
"context"
"log/slog" "log/slog"
"strings" "strings"
"testing" "testing"
@@ -54,11 +55,12 @@ func TestPrepareWhereClause_EmptyVariableList(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
opts := FilterExprVisitorOpts{ opts := FilterExprVisitorOpts{
Context: context.Background(),
FieldKeys: keys, FieldKeys: keys,
Variables: tt.variables, Variables: tt.variables,
} }
_, err := PrepareWhereClause(tt.expr, opts, 0, 0) _, err := PrepareWhereClause(tt.expr, opts)
if tt.expectError { if tt.expectError {
if err == nil { if err == nil {
@@ -467,7 +469,7 @@ func TestVisitKey(t *testing.T) {
expectedWarnings: nil, expectedWarnings: nil,
expectedMainWrnURL: "", expectedMainWrnURL: "",
}, },
{ {
name: "only attribute.custom_field is selected", name: "only attribute.custom_field is selected",
keyText: "attribute.attribute.custom_field", keyText: "attribute.attribute.custom_field",
fieldKeys: map[string][]*telemetrytypes.TelemetryFieldKey{ fieldKeys: map[string][]*telemetrytypes.TelemetryFieldKey{

View File

@@ -169,6 +169,7 @@ func NewSQLMigrationProviderFactories(
sqlmigration.NewAddAnonymousPublicDashboardTransactionFactory(sqlstore), sqlmigration.NewAddAnonymousPublicDashboardTransactionFactory(sqlstore),
sqlmigration.NewAddRootUserFactory(sqlstore, sqlschema), sqlmigration.NewAddRootUserFactory(sqlstore, sqlschema),
sqlmigration.NewAddUserEmailOrgIDIndexFactory(sqlstore, sqlschema), sqlmigration.NewAddUserEmailOrgIDIndexFactory(sqlstore, sqlschema),
sqlmigration.NewMigrateRulesV4ToV5Factory(sqlstore, telemetryStore),
) )
} }

View File

@@ -374,6 +374,7 @@ func New(
telemetrylogs.LogResourceKeysTblName, telemetrylogs.LogResourceKeysTblName,
telemetrymetadata.DBName, telemetrymetadata.DBName,
telemetrymetadata.AttributesMetadataLocalTableName, telemetrymetadata.AttributesMetadataLocalTableName,
telemetrymetadata.ColumnEvolutionMetadataTableName,
) )
global, err := factory.NewProviderFromNamedMap( global, err := factory.NewProviderFromNamedMap(

View File

@@ -0,0 +1,209 @@
package sqlmigration
import (
"context"
"database/sql"
"encoding/json"
"log/slog"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/transition"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type migrateRulesV4ToV5 struct {
store sqlstore.SQLStore
telemetryStore telemetrystore.TelemetryStore
logger *slog.Logger
}
func NewMigrateRulesV4ToV5Factory(
store sqlstore.SQLStore,
telemetryStore telemetrystore.TelemetryStore,
) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(
factory.MustNewName("migrate_rules_post_deprecation"),
func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return &migrateRulesV4ToV5{
store: store,
telemetryStore: telemetryStore,
logger: ps.Logger,
}, nil
})
}
func (migration *migrateRulesV4ToV5) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *migrateRulesV4ToV5) getLogDuplicateKeys(ctx context.Context) ([]string, error) {
query := `
SELECT name
FROM (
SELECT DISTINCT name FROM signoz_logs.distributed_logs_attribute_keys
INTERSECT
SELECT DISTINCT name FROM signoz_logs.distributed_logs_resource_keys
)
ORDER BY name
`
rows, err := migration.telemetryStore.ClickhouseDB().Query(ctx, query)
if err != nil {
migration.logger.WarnContext(ctx, "failed to query log duplicate keys", "error", err)
return nil, nil
}
defer rows.Close()
var keys []string
for rows.Next() {
var key string
if err := rows.Scan(&key); err != nil {
migration.logger.WarnContext(ctx, "failed to scan log duplicate key", "error", err)
continue
}
keys = append(keys, key)
}
return keys, nil
}
func (migration *migrateRulesV4ToV5) getTraceDuplicateKeys(ctx context.Context) ([]string, error) {
query := `
SELECT tagKey
FROM signoz_traces.distributed_span_attributes_keys
WHERE tagType IN ('tag', 'resource')
GROUP BY tagKey
HAVING COUNT(DISTINCT tagType) > 1
ORDER BY tagKey
`
rows, err := migration.telemetryStore.ClickhouseDB().Query(ctx, query)
if err != nil {
migration.logger.WarnContext(ctx, "failed to query trace duplicate keys", "error", err)
return nil, nil
}
defer rows.Close()
var keys []string
for rows.Next() {
var key string
if err := rows.Scan(&key); err != nil {
migration.logger.WarnContext(ctx, "failed to scan trace duplicate key", "error", err)
continue
}
keys = append(keys, key)
}
return keys, nil
}
func (migration *migrateRulesV4ToV5) Up(ctx context.Context, db *bun.DB) error {
logsKeys, err := migration.getLogDuplicateKeys(ctx)
if err != nil {
return err
}
tracesKeys, err := migration.getTraceDuplicateKeys(ctx)
if err != nil {
return err
}
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
var rules []struct {
ID string `bun:"id"`
Data map[string]any `bun:"data"`
}
err = tx.NewSelect().
Table("rule").
Column("id", "data").
Scan(ctx, &rules)
if err != nil {
if err == sql.ErrNoRows {
return nil
}
return err
}
alertsMigrator := transition.NewAlertMigrateV5(migration.logger, logsKeys, tracesKeys)
count := 0
for _, rule := range rules {
version, _ := rule.Data["version"].(string)
if version == "v5" {
continue
}
if version == "" {
migration.logger.WarnContext(ctx, "unexpected empty version for rule", "rule_id", rule.ID)
}
migration.logger.InfoContext(ctx, "migrating rule v4 to v5", "rule_id", rule.ID, "current_version", version)
// Check if the queries envelope already exists and is non-empty
hasQueriesEnvelope := false
if condition, ok := rule.Data["condition"].(map[string]any); ok {
if compositeQuery, ok := condition["compositeQuery"].(map[string]any); ok {
if queries, ok := compositeQuery["queries"].([]any); ok && len(queries) > 0 {
hasQueriesEnvelope = true
}
}
}
if hasQueriesEnvelope {
// already has queries envelope, just bump version
// this is because user made a mistake of choosing version
migration.logger.InfoContext(ctx, "rule already has queries envelope, bumping version", "rule_id", rule.ID)
rule.Data["version"] = "v5"
} else {
// old format, run full migration
migration.logger.InfoContext(ctx, "rule has old format, running full migration", "rule_id", rule.ID)
updated := alertsMigrator.Migrate(ctx, rule.Data)
if !updated {
migration.logger.WarnContext(ctx, "expected updated to be true but got false", "rule_id", rule.ID)
continue
}
rule.Data["version"] = "v5"
}
dataJSON, err := json.Marshal(rule.Data)
if err != nil {
return err
}
_, err = tx.NewUpdate().
Table("rule").
Set("data = ?", string(dataJSON)).
Where("id = ?", rule.ID).
Exec(ctx)
if err != nil {
return err
}
count++
}
if count != 0 {
migration.logger.InfoContext(ctx, "migrate v4 alerts", "count", count)
}
return tx.Commit()
}
func (migration *migrateRulesV4ToV5) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -25,30 +25,34 @@ func NewConditionBuilder(fm qbtypes.FieldMapper) *conditionBuilder {
func (c *conditionBuilder) conditionFor( func (c *conditionBuilder) conditionFor(
ctx context.Context, ctx context.Context,
startNs, endNs uint64,
key *telemetrytypes.TelemetryFieldKey, key *telemetrytypes.TelemetryFieldKey,
operator qbtypes.FilterOperator, operator qbtypes.FilterOperator,
value any, value any,
sb *sqlbuilder.SelectBuilder, sb *sqlbuilder.SelectBuilder,
) (string, error) { ) (string, error) {
column, err := c.fm.ColumnFor(ctx, key) columns, err := c.fm.ColumnFor(ctx, startNs, endNs, key)
if err != nil { if err != nil {
return "", err return "", err
} }
if column.IsJSONColumn() && querybuilder.BodyJSONQueryEnabled { // TODO(Piyush): Update this to support multiple JSON columns based on evolutions
valueType, value := InferDataType(value, operator, key) for _, column := range columns {
cond, err := NewJSONConditionBuilder(key, valueType).buildJSONCondition(operator, value, sb) if column.IsJSONColumn() && querybuilder.BodyJSONQueryEnabled {
if err != nil { valueType, value := InferDataType(value, operator, key)
return "", err cond, err := NewJSONConditionBuilder(key, valueType).buildJSONCondition(operator, value, sb)
if err != nil {
return "", err
}
return cond, nil
} }
return cond, nil
} }
if operator.IsStringSearchOperator() { if operator.IsStringSearchOperator() {
value = querybuilder.FormatValueForContains(value) value = querybuilder.FormatValueForContains(value)
} }
tblFieldName, err := c.fm.FieldFor(ctx, key) tblFieldName, err := c.fm.FieldFor(ctx, startNs, endNs, key)
if err != nil { if err != nil {
return "", err return "", err
} }
@@ -174,6 +178,31 @@ func (c *conditionBuilder) conditionFor(
} }
var value any var value any
column := columns[0]
if len(key.Evolutions) > 0 {
// we will use the corresponding column and its evolution entry for the query
newColumns, _, err := selectEvolutionsForColumns(columns, key.Evolutions, startNs, endNs)
if err != nil {
return "", err
}
if len(newColumns) == 0 {
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "no valid evolution found for field %s in the given time range", key.Name)
}
// This mean tblFieldName is with multiIf, we just need to do a null check.
if len(newColumns) > 1 {
if operator == qbtypes.FilterOperatorExists {
return sb.IsNotNull(tblFieldName), nil
} else {
return sb.IsNull(tblFieldName), nil
}
}
// otherwise we have to find the correct exist operator based on the column type
column = newColumns[0]
}
switch column.Type.GetType() { switch column.Type.GetType() {
case schema.ColumnTypeEnumJSON: case schema.ColumnTypeEnumJSON:
if operator == qbtypes.FilterOperatorExists { if operator == qbtypes.FilterOperatorExists {
@@ -228,6 +257,7 @@ func (c *conditionBuilder) conditionFor(
} }
default: default:
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "exists operator is not supported for column type %s", column.Type) return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "exists operator is not supported for column type %s", column.Type)
} }
} }
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "unsupported operator: %v", operator) return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "unsupported operator: %v", operator)
@@ -235,14 +265,15 @@ func (c *conditionBuilder) conditionFor(
func (c *conditionBuilder) ConditionFor( func (c *conditionBuilder) ConditionFor(
ctx context.Context, ctx context.Context,
startNs uint64,
endNs uint64,
key *telemetrytypes.TelemetryFieldKey, key *telemetrytypes.TelemetryFieldKey,
operator qbtypes.FilterOperator, operator qbtypes.FilterOperator,
value any, value any,
sb *sqlbuilder.SelectBuilder, sb *sqlbuilder.SelectBuilder,
_ uint64,
_ uint64,
) (string, error) { ) (string, error) {
condition, err := c.conditionFor(ctx, key, operator, value, sb)
condition, err := c.conditionFor(ctx, startNs, endNs, key, operator, value, sb)
if err != nil { if err != nil {
return "", err return "", err
} }
@@ -250,12 +281,12 @@ func (c *conditionBuilder) ConditionFor(
if !(key.FieldContext == telemetrytypes.FieldContextBody && querybuilder.BodyJSONQueryEnabled) && operator.AddDefaultExistsFilter() { if !(key.FieldContext == telemetrytypes.FieldContextBody && querybuilder.BodyJSONQueryEnabled) && operator.AddDefaultExistsFilter() {
// skip adding exists filter for intrinsic fields // skip adding exists filter for intrinsic fields
// with an exception for body json search // with an exception for body json search
field, _ := c.fm.FieldFor(ctx, key) field, _ := c.fm.FieldFor(ctx, startNs, endNs, key)
if slices.Contains(maps.Keys(IntrinsicFields), field) && key.FieldContext != telemetrytypes.FieldContextBody { if slices.Contains(maps.Keys(IntrinsicFields), field) && key.FieldContext != telemetrytypes.FieldContextBody {
return condition, nil return condition, nil
} }
existsCondition, err := c.conditionFor(ctx, key, qbtypes.FilterOperatorExists, nil, sb) existsCondition, err := c.conditionFor(ctx, startNs, endNs, key, qbtypes.FilterOperatorExists, nil, sb)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@@ -3,6 +3,7 @@ package telemetrylogs
import ( import (
"context" "context"
"testing" "testing"
"time"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5" qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes" "github.com/SigNoz/signoz/pkg/types/telemetrytypes"
@@ -11,14 +12,148 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestExistsConditionForWithEvolutions(t *testing.T) {
testCases := []struct {
name string
startTs uint64
endTs uint64
key telemetrytypes.TelemetryFieldKey
operator qbtypes.FilterOperator
value any
expectedSQL string
expectedArgs []any
expectedError error
}{
{
name: "New column",
startTs: uint64(time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC).UnixNano()),
endTs: uint64(time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC).UnixNano()),
key: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
Evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
FieldContext: telemetrytypes.FieldContextResource,
ColumnType: "Map(LowCardinality(String), String)",
FieldName: "__all__",
ReleaseTime: time.Unix(0, 0),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC),
},
},
},
operator: qbtypes.FilterOperatorExists,
value: nil,
expectedSQL: "WHERE resource.`service.name`::String IS NOT NULL",
expectedError: nil,
},
{
name: "Old column",
startTs: uint64(time.Date(2023, 1, 15, 10, 0, 0, 0, time.UTC).UnixNano()),
endTs: uint64(time.Date(2023, 1, 15, 10, 0, 0, 0, time.UTC).UnixNano()),
key: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
Evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
FieldContext: telemetrytypes.FieldContextResource,
ColumnType: "Map(LowCardinality(String), String)",
FieldName: "__all__",
ReleaseTime: time.Unix(0, 0),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC),
},
},
},
operator: qbtypes.FilterOperatorExists,
value: nil,
expectedSQL: "WHERE mapContains(resources_string, 'service.name') = ?",
expectedArgs: []any{true},
expectedError: nil,
},
{
name: "Both Old column and new - empty filter",
startTs: uint64(time.Date(2023, 1, 15, 10, 0, 0, 0, time.UTC).UnixNano()),
endTs: uint64(time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC).UnixNano()),
key: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
Evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
FieldContext: telemetrytypes.FieldContextResource,
ColumnType: "Map(LowCardinality(String), String)",
FieldName: "__all__",
ReleaseTime: time.Unix(0, 0),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC),
},
},
},
operator: qbtypes.FilterOperatorExists,
value: nil,
expectedSQL: "WHERE multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) IS NOT NULL",
expectedError: nil,
},
}
fm := NewFieldMapper()
conditionBuilder := NewConditionBuilder(fm)
ctx := context.Background()
for _, tc := range testCases {
sb := sqlbuilder.NewSelectBuilder()
t.Run(tc.name, func(t *testing.T) {
cond, err := conditionBuilder.ConditionFor(ctx, tc.startTs, tc.endTs, &tc.key, tc.operator, tc.value, sb)
sb.Where(cond)
if tc.expectedError != nil {
assert.Equal(t, tc.expectedError, err)
} else {
require.NoError(t, err)
sql, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
assert.Contains(t, sql, tc.expectedSQL)
assert.Equal(t, tc.expectedArgs, args)
}
})
}
}
func TestConditionFor(t *testing.T) { func TestConditionFor(t *testing.T) {
ctx := context.Background() ctx := context.Background()
mockEvolution := mockEvolutionData(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC))
testCases := []struct { testCases := []struct {
name string name string
key telemetrytypes.TelemetryFieldKey key telemetrytypes.TelemetryFieldKey
operator qbtypes.FilterOperator operator qbtypes.FilterOperator
value any value any
evolutions []*telemetrytypes.EvolutionEntry
expectedSQL string expectedSQL string
expectedArgs []any expectedArgs []any
expectedError error expectedError error
@@ -240,9 +375,11 @@ func TestConditionFor(t *testing.T) {
FieldContext: telemetrytypes.FieldContextResource, FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString, FieldDataType: telemetrytypes.FieldDataTypeString,
}, },
evolutions: mockEvolution,
operator: qbtypes.FilterOperatorExists, operator: qbtypes.FilterOperatorExists,
value: nil, value: nil,
expectedSQL: "WHERE multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) IS NOT NULL", expectedSQL: "mapContains(resources_string, 'service.name') = ?",
expectedArgs: []any{true},
expectedError: nil, expectedError: nil,
}, },
{ {
@@ -252,9 +389,11 @@ func TestConditionFor(t *testing.T) {
FieldContext: telemetrytypes.FieldContextResource, FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString, FieldDataType: telemetrytypes.FieldDataTypeString,
}, },
evolutions: mockEvolution,
operator: qbtypes.FilterOperatorNotExists, operator: qbtypes.FilterOperatorNotExists,
value: nil, value: nil,
expectedSQL: "WHERE multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) IS NULL", expectedSQL: "mapContains(resources_string, 'service.name') <> ?",
expectedArgs: []any{true},
expectedError: nil, expectedError: nil,
}, },
{ {
@@ -315,10 +454,11 @@ func TestConditionFor(t *testing.T) {
FieldDataType: telemetrytypes.FieldDataTypeString, FieldDataType: telemetrytypes.FieldDataTypeString,
Materialized: true, Materialized: true,
}, },
evolutions: mockEvolution,
operator: qbtypes.FilterOperatorRegexp, operator: qbtypes.FilterOperatorRegexp,
value: "frontend-.*", value: "frontend-.*",
expectedSQL: "(match(multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, `resource_string_service$$name_exists`==true, `resource_string_service$$name`, NULL), ?) AND multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, `resource_string_service$$name_exists`==true, `resource_string_service$$name`, NULL) IS NOT NULL)", expectedSQL: "WHERE (match(`resource_string_service$$name`, ?) AND `resource_string_service$$name_exists` = ?)",
expectedArgs: []any{"frontend-.*"}, expectedArgs: []any{"frontend-.*", true},
expectedError: nil, expectedError: nil,
}, },
{ {
@@ -329,9 +469,10 @@ func TestConditionFor(t *testing.T) {
FieldDataType: telemetrytypes.FieldDataTypeString, FieldDataType: telemetrytypes.FieldDataTypeString,
Materialized: true, Materialized: true,
}, },
evolutions: mockEvolution,
operator: qbtypes.FilterOperatorNotRegexp, operator: qbtypes.FilterOperatorNotRegexp,
value: "test-.*", value: "test-.*",
expectedSQL: "WHERE NOT match(multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, `resource_string_service$$name_exists`==true, `resource_string_service$$name`, NULL), ?)", expectedSQL: "WHERE NOT match(`resource_string_service$$name`, ?)",
expectedArgs: []any{"test-.*"}, expectedArgs: []any{"test-.*"},
expectedError: nil, expectedError: nil,
}, },
@@ -371,14 +512,13 @@ func TestConditionFor(t *testing.T) {
expectedError: qbtypes.ErrColumnNotFound, expectedError: qbtypes.ErrColumnNotFound,
}, },
} }
fm := NewFieldMapper() fm := NewFieldMapper()
conditionBuilder := NewConditionBuilder(fm) conditionBuilder := NewConditionBuilder(fm)
for _, tc := range testCases { for _, tc := range testCases {
sb := sqlbuilder.NewSelectBuilder() sb := sqlbuilder.NewSelectBuilder()
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
cond, err := conditionBuilder.ConditionFor(ctx, &tc.key, tc.operator, tc.value, sb, 0, 0) tc.key.Evolutions = tc.evolutions
cond, err := conditionBuilder.ConditionFor(ctx, 0, 0, &tc.key, tc.operator, tc.value, sb)
sb.Where(cond) sb.Where(cond)
if tc.expectedError != nil { if tc.expectedError != nil {
@@ -433,7 +573,7 @@ func TestConditionForMultipleKeys(t *testing.T) {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
var err error var err error
for _, key := range tc.keys { for _, key := range tc.keys {
cond, err := conditionBuilder.ConditionFor(ctx, &key, tc.operator, tc.value, sb, 0, 0) cond, err := conditionBuilder.conditionFor(ctx, 0, 0, &key, tc.operator, tc.value, sb)
sb.Where(cond) sb.Where(cond)
if err != nil { if err != nil {
t.Fatalf("Error getting condition for key %s: %v", key.Name, err) t.Fatalf("Error getting condition for key %s: %v", key.Name, err)
@@ -690,7 +830,7 @@ func TestConditionForJSONBodySearch(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
sb := sqlbuilder.NewSelectBuilder() sb := sqlbuilder.NewSelectBuilder()
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
cond, err := conditionBuilder.ConditionFor(ctx, &tc.key, tc.operator, tc.value, sb, 0, 0) cond, err := conditionBuilder.conditionFor(ctx, 0, 0, &tc.key, tc.operator, tc.value, sb)
sb.Where(cond) sb.Where(cond)
if tc.expectedError != nil { if tc.expectedError != nil {

View File

@@ -3,7 +3,10 @@ package telemetrylogs
import ( import (
"context" "context"
"fmt" "fmt"
"sort"
"strconv"
"strings" "strings"
"time"
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator" schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
"github.com/SigNoz/signoz-otel-collector/utils" "github.com/SigNoz/signoz-otel-collector/utils"
@@ -61,40 +64,42 @@ var (
} }
) )
type fieldMapper struct {} type fieldMapper struct{}
func NewFieldMapper() qbtypes.FieldMapper { func NewFieldMapper() qbtypes.FieldMapper {
return &fieldMapper{} return &fieldMapper{}
} }
func (m *fieldMapper) getColumn(_ context.Context, key *telemetrytypes.TelemetryFieldKey) (*schema.Column, error) {
func (m *fieldMapper) getColumn(_ context.Context, key *telemetrytypes.TelemetryFieldKey) ([]*schema.Column, error) {
switch key.FieldContext { switch key.FieldContext {
case telemetrytypes.FieldContextResource: case telemetrytypes.FieldContextResource:
return logsV2Columns["resource"], nil columns := []*schema.Column{logsV2Columns["resources_string"], logsV2Columns["resource"]}
return columns, nil
case telemetrytypes.FieldContextScope: case telemetrytypes.FieldContextScope:
switch key.Name { switch key.Name {
case "name", "scope.name", "scope_name": case "name", "scope.name", "scope_name":
return logsV2Columns["scope_name"], nil return []*schema.Column{logsV2Columns["scope_name"]}, nil
case "version", "scope.version", "scope_version": case "version", "scope.version", "scope_version":
return logsV2Columns["scope_version"], nil return []*schema.Column{logsV2Columns["scope_version"]}, nil
} }
return logsV2Columns["scope_string"], nil return []*schema.Column{logsV2Columns["scope_string"]}, nil
case telemetrytypes.FieldContextAttribute: case telemetrytypes.FieldContextAttribute:
switch key.FieldDataType { switch key.FieldDataType {
case telemetrytypes.FieldDataTypeString: case telemetrytypes.FieldDataTypeString:
return logsV2Columns["attributes_string"], nil return []*schema.Column{logsV2Columns["attributes_string"]}, nil
case telemetrytypes.FieldDataTypeInt64, telemetrytypes.FieldDataTypeFloat64, telemetrytypes.FieldDataTypeNumber: case telemetrytypes.FieldDataTypeInt64, telemetrytypes.FieldDataTypeFloat64, telemetrytypes.FieldDataTypeNumber:
return logsV2Columns["attributes_number"], nil return []*schema.Column{logsV2Columns["attributes_number"]}, nil
case telemetrytypes.FieldDataTypeBool: case telemetrytypes.FieldDataTypeBool:
return logsV2Columns["attributes_bool"], nil return []*schema.Column{logsV2Columns["attributes_bool"]}, nil
} }
case telemetrytypes.FieldContextBody: case telemetrytypes.FieldContextBody:
// Body context is for JSON body fields // Body context is for JSON body fields
// Use body_json if feature flag is enabled // Use body_json if feature flag is enabled
if querybuilder.BodyJSONQueryEnabled { if querybuilder.BodyJSONQueryEnabled {
return logsV2Columns[LogsV2BodyJSONColumn], nil return []*schema.Column{logsV2Columns[LogsV2BodyJSONColumn]}, nil
} }
// Fall back to legacy body column // Fall back to legacy body column
return logsV2Columns["body"], nil return []*schema.Column{logsV2Columns["body"]}, nil
case telemetrytypes.FieldContextLog, telemetrytypes.FieldContextUnspecified: case telemetrytypes.FieldContextLog, telemetrytypes.FieldContextUnspecified:
col, ok := logsV2Columns[key.Name] col, ok := logsV2Columns[key.Name]
if !ok { if !ok {
@@ -102,96 +107,237 @@ func (m *fieldMapper) getColumn(_ context.Context, key *telemetrytypes.Telemetry
if strings.HasPrefix(key.Name, telemetrytypes.BodyJSONStringSearchPrefix) { if strings.HasPrefix(key.Name, telemetrytypes.BodyJSONStringSearchPrefix) {
// Use body_json if feature flag is enabled and we have a body condition builder // Use body_json if feature flag is enabled and we have a body condition builder
if querybuilder.BodyJSONQueryEnabled { if querybuilder.BodyJSONQueryEnabled {
return logsV2Columns[LogsV2BodyJSONColumn], nil // TODO(Piyush): Update this to support multiple JSON columns based on evolutions
// i.e return both the body json and body json promoted and let the evolutions decide which one to use
// based on the query range time.
return []*schema.Column{logsV2Columns[LogsV2BodyJSONColumn]}, nil
} }
// Fall back to legacy body column // Fall back to legacy body column
return logsV2Columns["body"], nil return []*schema.Column{logsV2Columns["body"]}, nil
} }
return nil, qbtypes.ErrColumnNotFound return nil, qbtypes.ErrColumnNotFound
} }
return col, nil return []*schema.Column{col}, nil
} }
return nil, qbtypes.ErrColumnNotFound return nil, qbtypes.ErrColumnNotFound
} }
func (m *fieldMapper) FieldFor(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (string, error) { // selectEvolutionsForColumns selects the appropriate evolution entries for each column based on the time range.
column, err := m.getColumn(ctx, key) // Logic:
// - Finds the latest base evolution (<= tsStartTime) across ALL columns
// - Rejects all evolutions before this latest base evolution
// - For duplicate evolutions it considers the oldest one (first in ReleaseTime)
// - For each column, includes its evolution if it's >= latest base evolution and <= tsEndTime
// - Results are sorted by ReleaseTime descending (newest first)
func selectEvolutionsForColumns(columns []*schema.Column, evolutions []*telemetrytypes.EvolutionEntry, tsStart, tsEnd uint64) ([]*schema.Column, []*telemetrytypes.EvolutionEntry, error) {
sortedEvolutions := make([]*telemetrytypes.EvolutionEntry, len(evolutions))
copy(sortedEvolutions, evolutions)
// sort the evolutions by ReleaseTime ascending
sort.Slice(sortedEvolutions, func(i, j int) bool {
return sortedEvolutions[i].ReleaseTime.Before(sortedEvolutions[j].ReleaseTime)
})
tsStartTime := time.Unix(0, int64(tsStart))
tsEndTime := time.Unix(0, int64(tsEnd))
// Build evolution map: column name -> evolution
evolutionMap := make(map[string]*telemetrytypes.EvolutionEntry)
for _, evolution := range sortedEvolutions {
if _, exists := evolutionMap[evolution.ColumnName+":"+evolution.FieldName+":"+strconv.Itoa(int(evolution.Version))]; exists {
// since if there is duplicate we would just use the oldest one.
continue
}
evolutionMap[evolution.ColumnName+":"+evolution.FieldName+":"+strconv.Itoa(int(evolution.Version))] = evolution
}
// Find the latest base evolution (<= tsStartTime) across ALL columns
// Evolutions are sorted, so we can break early
var latestBaseEvolutionAcrossAll *telemetrytypes.EvolutionEntry
for _, evolution := range sortedEvolutions {
if evolution.ReleaseTime.After(tsStartTime) {
break
}
latestBaseEvolutionAcrossAll = evolution
}
// We shouldn't reach this, it basically means there is something wrong with the evolutions data
if latestBaseEvolutionAcrossAll == nil {
return nil, nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "no base evolution found for columns %v", columns)
}
columnLookUpMap := make(map[string]*schema.Column)
for _, column := range columns {
columnLookUpMap[column.Name] = column
}
// Collect column-evolution pairs
type colEvoPair struct {
column *schema.Column
evolution *telemetrytypes.EvolutionEntry
}
pairs := []colEvoPair{}
for _, evolution := range evolutionMap {
// Reject evolutions before the latest base evolution
if evolution.ReleaseTime.Before(latestBaseEvolutionAcrossAll.ReleaseTime) {
continue
}
// skip evolutions after tsEndTime
if evolution.ReleaseTime.After(tsEndTime) || evolution.ReleaseTime.Equal(tsEndTime) {
continue
}
if _, exists := columnLookUpMap[evolution.ColumnName]; !exists {
return nil, nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "evolution column %s not found in columns %v", evolution.ColumnName, columns)
}
pairs = append(pairs, colEvoPair{columnLookUpMap[evolution.ColumnName], evolution})
}
// If no pairs found, fall back to latestBaseEvolutionAcrossAll for matching columns
if len(pairs) == 0 {
for _, column := range columns {
// Use latestBaseEvolutionAcrossAll if this column name matches its column name
if column.Name == latestBaseEvolutionAcrossAll.ColumnName {
pairs = append(pairs, colEvoPair{column, latestBaseEvolutionAcrossAll})
}
}
}
// Sort by ReleaseTime descending (newest first)
for i := 0; i < len(pairs)-1; i++ {
for j := i + 1; j < len(pairs); j++ {
if pairs[i].evolution.ReleaseTime.Before(pairs[j].evolution.ReleaseTime) {
pairs[i], pairs[j] = pairs[j], pairs[i]
}
}
}
// Extract results
newColumns := make([]*schema.Column, len(pairs))
evolutionsEntries := make([]*telemetrytypes.EvolutionEntry, len(pairs))
for i, pair := range pairs {
newColumns[i] = pair.column
evolutionsEntries[i] = pair.evolution
}
return newColumns, evolutionsEntries, nil
}
func (m *fieldMapper) FieldFor(ctx context.Context, tsStart, tsEnd uint64, key *telemetrytypes.TelemetryFieldKey) (string, error) {
columns, err := m.getColumn(ctx, key)
if err != nil { if err != nil {
return "", err return "", err
} }
switch column.Type.GetType() { var newColumns []*schema.Column
case schema.ColumnTypeEnumJSON: var evolutionsEntries []*telemetrytypes.EvolutionEntry
// json is only supported for resource context as of now if len(key.Evolutions) > 0 {
switch key.FieldContext { // we will use the corresponding column and its evolution entry for the query
case telemetrytypes.FieldContextResource: newColumns, evolutionsEntries, err = selectEvolutionsForColumns(columns, key.Evolutions, tsStart, tsEnd)
oldColumn := logsV2Columns["resources_string"] if err != nil {
oldKeyName := fmt.Sprintf("%s['%s']", oldColumn.Name, key.Name) return "", err
// have to add ::string as clickHouse throws an error :- data types Variant/Dynamic are not allowed in GROUP BY
// once clickHouse dependency is updated, we need to check if we can remove it.
if key.Materialized {
oldKeyName = telemetrytypes.FieldKeyToMaterializedColumnName(key)
oldKeyNameExists := telemetrytypes.FieldKeyToMaterializedColumnNameForExists(key)
return fmt.Sprintf("multiIf(%s.`%s` IS NOT NULL, %s.`%s`::String, %s==true, %s, NULL)", column.Name, key.Name, column.Name, key.Name, oldKeyNameExists, oldKeyName), nil
}
return fmt.Sprintf("multiIf(%s.`%s` IS NOT NULL, %s.`%s`::String, mapContains(%s, '%s'), %s, NULL)", column.Name, key.Name, column.Name, key.Name, oldColumn.Name, key.Name, oldKeyName), nil
case telemetrytypes.FieldContextBody:
if key.JSONDataType == nil {
return "", qbtypes.ErrColumnNotFound
}
if key.KeyNameContainsArray() && !key.JSONDataType.IsArray {
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "FieldFor not supported for nested fields; only supported for flat paths (e.g. body.status.detail) and paths of Array type: %s(%s)", key.Name, key.FieldDataType)
}
return m.buildFieldForJSON(key)
default:
return "", errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "only resource/body context fields are supported for json columns, got %s", key.FieldContext.String)
} }
case schema.ColumnTypeEnumLowCardinality: } else {
switch elementType := column.Type.(schema.LowCardinalityColumnType).ElementType; elementType.GetType() { newColumns = columns
case schema.ColumnTypeEnumString: }
exprs := []string{}
existExpr := []string{}
for i, column := range newColumns {
// Use evolution column name if available, otherwise use the column name
columnName := column.Name
if evolutionsEntries != nil && evolutionsEntries[i] != nil {
columnName = evolutionsEntries[i].ColumnName
}
switch column.Type.GetType() {
case schema.ColumnTypeEnumJSON:
switch key.FieldContext {
case telemetrytypes.FieldContextResource:
exprs = append(exprs, fmt.Sprintf("%s.`%s`::String", columnName, key.Name))
existExpr = append(existExpr, fmt.Sprintf("%s.`%s` IS NOT NULL", columnName, key.Name))
case telemetrytypes.FieldContextBody:
if key.JSONDataType == nil {
return "", qbtypes.ErrColumnNotFound
}
if key.KeyNameContainsArray() && !key.JSONDataType.IsArray {
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "FieldFor not supported for nested fields; only supported for flat paths (e.g. body.status.detail) and paths of Array type: %s(%s)", key.Name, key.FieldDataType)
}
expr, err := m.buildFieldForJSON(key)
if err != nil {
return "", err
}
exprs = append(exprs, expr)
default:
return "", errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "only resource/body context fields are supported for json columns, got %s", key.FieldContext.String)
}
case schema.ColumnTypeEnumLowCardinality:
switch elementType := column.Type.(schema.LowCardinalityColumnType).ElementType; elementType.GetType() {
case schema.ColumnTypeEnumString:
exprs = append(exprs, column.Name)
default:
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "exists operator is not supported for low cardinality column type %s", elementType)
}
case schema.ColumnTypeEnumString,
schema.ColumnTypeEnumUInt64, schema.ColumnTypeEnumUInt32, schema.ColumnTypeEnumUInt8:
return column.Name, nil return column.Name, nil
default: case schema.ColumnTypeEnumMap:
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "exists operator is not supported for low cardinality column type %s", elementType) keyType := column.Type.(schema.MapColumnType).KeyType
} if _, ok := keyType.(schema.LowCardinalityColumnType); !ok {
case schema.ColumnTypeEnumString, return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "key type %s is not supported for map column type %s", keyType, column.Type)
schema.ColumnTypeEnumUInt64, schema.ColumnTypeEnumUInt32, schema.ColumnTypeEnumUInt8: }
return column.Name, nil
case schema.ColumnTypeEnumMap: switch valueType := column.Type.(schema.MapColumnType).ValueType; valueType.GetType() {
keyType := column.Type.(schema.MapColumnType).KeyType case schema.ColumnTypeEnumString, schema.ColumnTypeEnumBool, schema.ColumnTypeEnumFloat64:
if _, ok := keyType.(schema.LowCardinalityColumnType); !ok { // a key could have been materialized, if so return the materialized column name
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "key type %s is not supported for map column type %s", keyType, column.Type) if key.Materialized {
} return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
}
switch valueType := column.Type.(schema.MapColumnType).ValueType; valueType.GetType() { exprs = append(exprs, fmt.Sprintf("%s['%s']", columnName, key.Name))
case schema.ColumnTypeEnumString, schema.ColumnTypeEnumBool, schema.ColumnTypeEnumFloat64: existExpr = append(existExpr, fmt.Sprintf("mapContains(%s, '%s')", columnName, key.Name))
// a key could have been materialized, if so return the materialized column name default:
if key.Materialized { return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "exists operator is not supported for map column type %s", valueType)
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
} }
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
default:
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "exists operator is not supported for map column type %s", valueType)
} }
} }
if len(exprs) == 1 {
return exprs[0], nil
} else if len(exprs) > 1 {
// Ensure existExpr has the same length as exprs
if len(existExpr) != len(exprs) {
return "", errors.New(errors.TypeInternal, errors.CodeInternal, "length of exist exprs doesn't match to that of exprs")
}
finalExprs := []string{}
for i, expr := range exprs {
finalExprs = append(finalExprs, fmt.Sprintf("%s, %s", existExpr[i], expr))
}
return "multiIf(" + strings.Join(finalExprs, ", ") + ", NULL)", nil
}
// should not reach here // should not reach here
return column.Name, nil return columns[0].Name, nil
} }
func (m *fieldMapper) ColumnFor(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (*schema.Column, error) { func (m *fieldMapper) ColumnFor(ctx context.Context, _, _ uint64, key *telemetrytypes.TelemetryFieldKey) ([]*schema.Column, error) {
return m.getColumn(ctx, key) return m.getColumn(ctx, key)
} }
func (m *fieldMapper) ColumnExpressionFor( func (m *fieldMapper) ColumnExpressionFor(
ctx context.Context, ctx context.Context,
tsStart, tsEnd uint64,
field *telemetrytypes.TelemetryFieldKey, field *telemetrytypes.TelemetryFieldKey,
keys map[string][]*telemetrytypes.TelemetryFieldKey, keys map[string][]*telemetrytypes.TelemetryFieldKey,
) (string, error) { ) (string, error) {
colName, err := m.FieldFor(ctx, field) colName, err := m.FieldFor(ctx, tsStart, tsEnd, field)
if errors.Is(err, qbtypes.ErrColumnNotFound) { if errors.Is(err, qbtypes.ErrColumnNotFound) {
// the key didn't have the right context to be added to the query // the key didn't have the right context to be added to the query
// we try to use the context we know of // we try to use the context we know of
@@ -201,7 +347,7 @@ func (m *fieldMapper) ColumnExpressionFor(
if _, ok := logsV2Columns[field.Name]; ok { if _, ok := logsV2Columns[field.Name]; ok {
// if it is, attach the column name directly // if it is, attach the column name directly
field.FieldContext = telemetrytypes.FieldContextLog field.FieldContext = telemetrytypes.FieldContextLog
colName, _ = m.FieldFor(ctx, field) colName, _ = m.FieldFor(ctx, tsStart, tsEnd, field)
} else { } else {
// - the context is not provided // - the context is not provided
// - there are not keys for the field // - there are not keys for the field
@@ -219,12 +365,12 @@ func (m *fieldMapper) ColumnExpressionFor(
} }
} else if len(keysForField) == 1 { } else if len(keysForField) == 1 {
// we have a single key for the field, use it // we have a single key for the field, use it
colName, _ = m.FieldFor(ctx, keysForField[0]) colName, _ = m.FieldFor(ctx, tsStart, tsEnd, keysForField[0])
} else { } else {
// select any non-empty value from the keys // select any non-empty value from the keys
args := []string{} args := []string{}
for _, key := range keysForField { for _, key := range keysForField {
colName, _ = m.FieldFor(ctx, key) colName, _ = m.FieldFor(ctx, tsStart, tsEnd, key)
args = append(args, fmt.Sprintf("toString(%s) != '', toString(%s)", colName, colName)) args = append(args, fmt.Sprintf("toString(%s) != '', toString(%s)", colName, colName))
} }
colName = fmt.Sprintf("multiIf(%s, NULL)", strings.Join(args, ", ")) colName = fmt.Sprintf("multiIf(%s, NULL)", strings.Join(args, ", "))

View File

@@ -3,6 +3,7 @@ package telemetrylogs
import ( import (
"context" "context"
"testing" "testing"
"time"
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator" schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5" qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
@@ -17,7 +18,7 @@ func TestGetColumn(t *testing.T) {
testCases := []struct { testCases := []struct {
name string name string
key telemetrytypes.TelemetryFieldKey key telemetrytypes.TelemetryFieldKey
expectedCol *schema.Column expectedCol []*schema.Column
expectedError error expectedError error
}{ }{
{ {
@@ -26,7 +27,7 @@ func TestGetColumn(t *testing.T) {
Name: "service.name", Name: "service.name",
FieldContext: telemetrytypes.FieldContextResource, FieldContext: telemetrytypes.FieldContextResource,
}, },
expectedCol: logsV2Columns["resource"], expectedCol: []*schema.Column{logsV2Columns["resources_string"], logsV2Columns["resource"]},
expectedError: nil, expectedError: nil,
}, },
{ {
@@ -35,7 +36,7 @@ func TestGetColumn(t *testing.T) {
Name: "name", Name: "name",
FieldContext: telemetrytypes.FieldContextScope, FieldContext: telemetrytypes.FieldContextScope,
}, },
expectedCol: logsV2Columns["scope_name"], expectedCol: []*schema.Column{logsV2Columns["scope_name"]},
expectedError: nil, expectedError: nil,
}, },
{ {
@@ -44,7 +45,7 @@ func TestGetColumn(t *testing.T) {
Name: "scope.name", Name: "scope.name",
FieldContext: telemetrytypes.FieldContextScope, FieldContext: telemetrytypes.FieldContextScope,
}, },
expectedCol: logsV2Columns["scope_name"], expectedCol: []*schema.Column{logsV2Columns["scope_name"]},
expectedError: nil, expectedError: nil,
}, },
{ {
@@ -53,7 +54,7 @@ func TestGetColumn(t *testing.T) {
Name: "scope_name", Name: "scope_name",
FieldContext: telemetrytypes.FieldContextScope, FieldContext: telemetrytypes.FieldContextScope,
}, },
expectedCol: logsV2Columns["scope_name"], expectedCol: []*schema.Column{logsV2Columns["scope_name"]},
expectedError: nil, expectedError: nil,
}, },
{ {
@@ -62,7 +63,7 @@ func TestGetColumn(t *testing.T) {
Name: "version", Name: "version",
FieldContext: telemetrytypes.FieldContextScope, FieldContext: telemetrytypes.FieldContextScope,
}, },
expectedCol: logsV2Columns["scope_version"], expectedCol: []*schema.Column{logsV2Columns["scope_version"]},
expectedError: nil, expectedError: nil,
}, },
{ {
@@ -71,7 +72,7 @@ func TestGetColumn(t *testing.T) {
Name: "custom.scope.field", Name: "custom.scope.field",
FieldContext: telemetrytypes.FieldContextScope, FieldContext: telemetrytypes.FieldContextScope,
}, },
expectedCol: logsV2Columns["scope_string"], expectedCol: []*schema.Column{logsV2Columns["scope_string"]},
expectedError: nil, expectedError: nil,
}, },
{ {
@@ -81,7 +82,7 @@ func TestGetColumn(t *testing.T) {
FieldContext: telemetrytypes.FieldContextAttribute, FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeString, FieldDataType: telemetrytypes.FieldDataTypeString,
}, },
expectedCol: logsV2Columns["attributes_string"], expectedCol: []*schema.Column{logsV2Columns["attributes_string"]},
expectedError: nil, expectedError: nil,
}, },
{ {
@@ -91,7 +92,7 @@ func TestGetColumn(t *testing.T) {
FieldContext: telemetrytypes.FieldContextAttribute, FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeNumber, FieldDataType: telemetrytypes.FieldDataTypeNumber,
}, },
expectedCol: logsV2Columns["attributes_number"], expectedCol: []*schema.Column{logsV2Columns["attributes_number"]},
expectedError: nil, expectedError: nil,
}, },
{ {
@@ -101,7 +102,7 @@ func TestGetColumn(t *testing.T) {
FieldContext: telemetrytypes.FieldContextAttribute, FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeInt64, FieldDataType: telemetrytypes.FieldDataTypeInt64,
}, },
expectedCol: logsV2Columns["attributes_number"], expectedCol: []*schema.Column{logsV2Columns["attributes_number"]},
expectedError: nil, expectedError: nil,
}, },
{ {
@@ -111,7 +112,7 @@ func TestGetColumn(t *testing.T) {
FieldContext: telemetrytypes.FieldContextAttribute, FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeFloat64, FieldDataType: telemetrytypes.FieldDataTypeFloat64,
}, },
expectedCol: logsV2Columns["attributes_number"], expectedCol: []*schema.Column{logsV2Columns["attributes_number"]},
expectedError: nil, expectedError: nil,
}, },
{ {
@@ -121,7 +122,7 @@ func TestGetColumn(t *testing.T) {
FieldContext: telemetrytypes.FieldContextAttribute, FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeBool, FieldDataType: telemetrytypes.FieldDataTypeBool,
}, },
expectedCol: logsV2Columns["attributes_bool"], expectedCol: []*schema.Column{logsV2Columns["attributes_bool"]},
expectedError: nil, expectedError: nil,
}, },
{ {
@@ -130,7 +131,7 @@ func TestGetColumn(t *testing.T) {
Name: "timestamp", Name: "timestamp",
FieldContext: telemetrytypes.FieldContextLog, FieldContext: telemetrytypes.FieldContextLog,
}, },
expectedCol: logsV2Columns["timestamp"], expectedCol: []*schema.Column{logsV2Columns["timestamp"]},
expectedError: nil, expectedError: nil,
}, },
{ {
@@ -139,7 +140,7 @@ func TestGetColumn(t *testing.T) {
Name: "body", Name: "body",
FieldContext: telemetrytypes.FieldContextLog, FieldContext: telemetrytypes.FieldContextLog,
}, },
expectedCol: logsV2Columns["body"], expectedCol: []*schema.Column{logsV2Columns["body"]},
expectedError: nil, expectedError: nil,
}, },
{ {
@@ -159,7 +160,7 @@ func TestGetColumn(t *testing.T) {
FieldContext: telemetrytypes.FieldContextAttribute, FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeBool, FieldDataType: telemetrytypes.FieldDataTypeBool,
}, },
expectedCol: logsV2Columns["attributes_bool"], expectedCol: []*schema.Column{logsV2Columns["attributes_bool"]},
expectedError: nil, expectedError: nil,
}, },
} }
@@ -168,7 +169,7 @@ func TestGetColumn(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
col, err := fm.ColumnFor(ctx, &tc.key) col, err := fm.ColumnFor(ctx, 0, 0, &tc.key)
if tc.expectedError != nil { if tc.expectedError != nil {
assert.Equal(t, tc.expectedError, err) assert.Equal(t, tc.expectedError, err)
@@ -183,11 +184,14 @@ func TestGetColumn(t *testing.T) {
func TestGetFieldKeyName(t *testing.T) { func TestGetFieldKeyName(t *testing.T) {
ctx := context.Background() ctx := context.Background()
resourceEvolution := mockEvolutionData(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC))
testCases := []struct { testCases := []struct {
name string name string
key telemetrytypes.TelemetryFieldKey key telemetrytypes.TelemetryFieldKey
expectedResult string expectedResult string
expectedError error expectedError error
addExistsFilter bool
}{ }{
{ {
name: "Simple column type - timestamp", name: "Simple column type - timestamp",
@@ -195,8 +199,9 @@ func TestGetFieldKeyName(t *testing.T) {
Name: "timestamp", Name: "timestamp",
FieldContext: telemetrytypes.FieldContextLog, FieldContext: telemetrytypes.FieldContextLog,
}, },
expectedResult: "timestamp", expectedResult: "timestamp",
expectedError: nil, expectedError: nil,
addExistsFilter: false,
}, },
{ {
name: "Map column type - string attribute", name: "Map column type - string attribute",
@@ -205,8 +210,9 @@ func TestGetFieldKeyName(t *testing.T) {
FieldContext: telemetrytypes.FieldContextAttribute, FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeString, FieldDataType: telemetrytypes.FieldDataTypeString,
}, },
expectedResult: "attributes_string['user.id']", expectedResult: "attributes_string['user.id']",
expectedError: nil, expectedError: nil,
addExistsFilter: false,
}, },
{ {
name: "Map column type - number attribute", name: "Map column type - number attribute",
@@ -215,8 +221,9 @@ func TestGetFieldKeyName(t *testing.T) {
FieldContext: telemetrytypes.FieldContextAttribute, FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeNumber, FieldDataType: telemetrytypes.FieldDataTypeNumber,
}, },
expectedResult: "attributes_number['request.size']", expectedResult: "attributes_number['request.size']",
expectedError: nil, expectedError: nil,
addExistsFilter: false,
}, },
{ {
name: "Map column type - bool attribute", name: "Map column type - bool attribute",
@@ -225,28 +232,33 @@ func TestGetFieldKeyName(t *testing.T) {
FieldContext: telemetrytypes.FieldContextAttribute, FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeBool, FieldDataType: telemetrytypes.FieldDataTypeBool,
}, },
expectedResult: "attributes_bool['request.success']", expectedResult: "attributes_bool['request.success']",
expectedError: nil, expectedError: nil,
addExistsFilter: false,
}, },
{ {
name: "Map column type - resource attribute", name: "Map column type - resource attribute",
key: telemetrytypes.TelemetryFieldKey{ key: telemetrytypes.TelemetryFieldKey{
Name: "service.name", Name: "service.name",
FieldContext: telemetrytypes.FieldContextResource, FieldContext: telemetrytypes.FieldContextResource,
Evolutions: resourceEvolution,
}, },
expectedResult: "multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL)", expectedResult: "resources_string['service.name']",
expectedError: nil, expectedError: nil,
addExistsFilter: false,
}, },
{ {
name: "Map column type - resource attribute - Materialized", name: "Map column type - resource attribute - Materialized - json",
key: telemetrytypes.TelemetryFieldKey{ key: telemetrytypes.TelemetryFieldKey{
Name: "service.name", Name: "service.name",
FieldContext: telemetrytypes.FieldContextResource, FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString, FieldDataType: telemetrytypes.FieldDataTypeString,
Materialized: true, Materialized: true,
Evolutions: resourceEvolution,
}, },
expectedResult: "multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, `resource_string_service$$name_exists`==true, `resource_string_service$$name`, NULL)", expectedResult: "`resource_string_service$$name`",
expectedError: nil, expectedError: nil,
addExistsFilter: false,
}, },
{ {
name: "Non-existent column", name: "Non-existent column",
@@ -262,7 +274,7 @@ func TestGetFieldKeyName(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
fm := NewFieldMapper() fm := NewFieldMapper()
result, err := fm.FieldFor(ctx, &tc.key) result, err := fm.FieldFor(ctx, 0, 0, &tc.key)
if tc.expectedError != nil { if tc.expectedError != nil {
assert.Equal(t, tc.expectedError, err) assert.Equal(t, tc.expectedError, err)
@@ -273,3 +285,571 @@ func TestGetFieldKeyName(t *testing.T) {
}) })
} }
} }
func TestFieldForWithEvolutions(t *testing.T) {
ctx := context.Background()
key := &telemetrytypes.TelemetryFieldKey{
Name: "service.name",
FieldContext: telemetrytypes.FieldContextResource,
}
testCases := []struct {
name string
evolutions []*telemetrytypes.EvolutionEntry
key *telemetrytypes.TelemetryFieldKey
tsStartTime time.Time
tsEndTime time.Time
expectedResult string
expectedError error
}{
{
name: "Single evolution before tsStartTime",
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: time.Date(2024, 1, 15, 0, 0, 0, 0, time.UTC),
},
},
key: key,
tsStartTime: time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC),
tsEndTime: time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC),
expectedResult: "resources_string['service.name']",
expectedError: nil,
},
{
name: "Single evolution exactly at tsStartTime",
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC),
},
},
key: key,
tsStartTime: time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC),
tsEndTime: time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC),
expectedResult: "resources_string['service.name']",
expectedError: nil,
},
{
name: "Single evolution after tsStartTime",
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: time.Unix(0, 0),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: time.Date(2024, 2, 2, 0, 0, 0, 0, time.UTC),
},
},
key: key,
tsStartTime: time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC),
tsEndTime: time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC),
expectedResult: "multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL)",
expectedError: nil,
},
// TODO(piyush): to be added once integration with JSON is done.
// {
// name: "Single evolution after tsStartTime - JSON body",
// evolutions: []*telemetrytypes.EvolutionEntry{
// {
// Signal: telemetrytypes.SignalLogs,
// ColumnName: LogsV2BodyJSONColumn,
// ColumnType: "JSON(max_dynamic_paths=0)",
// FieldContext: telemetrytypes.FieldContextBody,
// FieldName: "__all__",
// ReleaseTime: time.Unix(0, 0),
// },
// {
// Signal: telemetrytypes.SignalLogs,
// ColumnName: LogsV2BodyPromotedColumn,
// ColumnType: "JSON()",
// FieldContext: telemetrytypes.FieldContextBody,
// FieldName: "user.name",
// ReleaseTime: time.Date(2024, 2, 2, 0, 0, 0, 0, time.UTC),
// },
// },
// key: &telemetrytypes.TelemetryFieldKey{
// Name: "user.name",
// FieldContext: telemetrytypes.FieldContextBody,
// JSONDataType: &telemetrytypes.String,
// Materialized: true,
// },
// tsStartTime: time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC),
// tsEndTime: time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC),
// expectedResult: "coalesce(dynamicElement(body_json.`user.name`, 'String'), dynamicElement(body_promoted.`user.name`, 'String'))",
// expectedError: nil,
// },
{
name: "Multiple evolutions before tsStartTime - only latest should be included",
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: time.Unix(0, 0),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC),
},
},
key: key,
tsStartTime: time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC),
tsEndTime: time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC),
expectedResult: "resource.`service.name`::String",
expectedError: nil,
},
{
name: "Multiple evolutions after tsStartTime - all should be included",
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: time.Unix(0, 0),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC),
},
},
key: key,
tsStartTime: time.Unix(0, 0),
tsEndTime: time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC),
expectedResult: "multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL)",
expectedError: nil,
},
{
name: "Duplicate evolutions after tsStartTime - all should be included",
// Note: on production when this happens, we should go ahead and clean it up if required
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: time.Unix(0, 0),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: time.Date(2024, 2, 3, 0, 0, 0, 0, time.UTC),
},
},
key: key,
tsStartTime: time.Date(2024, 2, 2, 0, 0, 0, 0, time.UTC),
tsEndTime: time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC),
expectedResult: "resource.`service.name`::String",
expectedError: nil,
},
{
name: "Evolution exactly at tsEndTime - should not be included",
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: time.Unix(0, 0),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC),
},
},
key: key,
tsStartTime: time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC),
tsEndTime: time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC),
expectedResult: "resources_string['service.name']",
expectedError: nil,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
fm := NewFieldMapper()
tsStart := uint64(tc.tsStartTime.UnixNano())
tsEnd := uint64(tc.tsEndTime.UnixNano())
tc.key.Evolutions = tc.evolutions
result, err := fm.FieldFor(ctx, tsStart, tsEnd, tc.key)
if tc.expectedError != nil {
assert.Equal(t, tc.expectedError, err)
} else {
require.NoError(t, err)
assert.Equal(t, tc.expectedResult, result)
}
})
}
}
func TestSelectEvolutionsForColumns(t *testing.T) {
testCases := []struct {
name string
columns []*schema.Column
evolutions []*telemetrytypes.EvolutionEntry
tsStart uint64
tsEnd uint64
expectedColumns []string // column names
expectedEvols []string // evolution column names
expectedError bool
errorStr string
}{
{
name: "New evolutions after tsStartTime - should include all",
columns: []*schema.Column{
logsV2Columns["resources_string"],
logsV2Columns["resource"],
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 0,
ReleaseTime: time.Date(0, 0, 0, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 1,
ReleaseTime: time.Date(2024, 2, 3, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{"resource", "resources_string"}, // sorted by ReleaseTime desc
expectedEvols: []string{"resource", "resources_string"},
},
{
name: "Columns without matching evolutions - should exclude them",
columns: []*schema.Column{
logsV2Columns["resources_string"],
logsV2Columns["resource"], // no evolution for this
logsV2Columns["attributes_string"], // no evolution for this
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 0,
ReleaseTime: time.Date(2024, 1, 15, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{"resources_string"},
expectedEvols: []string{"resources_string"},
},
{
name: "New evolutions after tsEndTime - should exclude all",
columns: []*schema.Column{
logsV2Columns["resources_string"],
logsV2Columns["resource"],
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 0,
ReleaseTime: time.Date(0, 0, 0, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 1,
ReleaseTime: time.Date(2024, 2, 25, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{"resources_string"},
expectedEvols: []string{"resources_string"},
},
{
name: "Empty columns array",
columns: []*schema.Column{},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 0,
ReleaseTime: time.Date(2024, 1, 15, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{},
expectedEvols: []string{},
expectedError: true,
errorStr: "column resources_string not found",
},
{
name: "Duplicate evolutions - should use first encountered (oldest if sorted)",
columns: []*schema.Column{
logsV2Columns["resource"],
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 0,
ReleaseTime: time.Date(0, 0, 0, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 1,
ReleaseTime: time.Date(2024, 1, 15, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 1,
ReleaseTime: time.Date(2024, 1, 20, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{"resource"},
expectedEvols: []string{"resource"}, // should use first one (older)
},
{
name: "Genuine Duplicate evolutions with new version- should consider both",
columns: []*schema.Column{
logsV2Columns["resources_string"],
logsV2Columns["resource"],
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 0,
ReleaseTime: time.Date(0, 0, 0, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 1,
ReleaseTime: time.Date(2024, 1, 15, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
Version: 2,
ReleaseTime: time.Date(2024, 1, 20, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 1, 16, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{"resources_string", "resource"},
expectedEvols: []string{"resources_string", "resource"}, // should use first one (older)
},
{
name: "Evolution exactly at tsEndTime",
columns: []*schema.Column{
logsV2Columns["resources_string"],
logsV2Columns["resource"],
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
ColumnType: "Map(LowCardinality(String), String)",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: time.Date(2024, 1, 15, 0, 0, 0, 0, time.UTC),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC), // exactly at tsEnd
},
},
tsStart: uint64(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{"resources_string"}, // resource excluded because After(tsEnd) is true
expectedEvols: []string{"resources_string"},
},
{
name: "Single evolution after tsStartTime - JSON body",
columns: []*schema.Column{
logsV2Columns[LogsV2BodyJSONColumn],
logsV2Columns[LogsV2BodyPromotedColumn],
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: LogsV2BodyJSONColumn,
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextBody,
FieldName: "__all__",
ReleaseTime: time.Unix(0, 0),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: LogsV2BodyPromotedColumn,
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextBody,
FieldName: "user.name",
ReleaseTime: time.Date(2024, 2, 2, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 1, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{LogsV2BodyPromotedColumn, LogsV2BodyJSONColumn}, // sorted by ReleaseTime desc (newest first)
expectedEvols: []string{LogsV2BodyPromotedColumn, LogsV2BodyJSONColumn},
},
{
name: "No evolution after tsStartTime - JSON body",
columns: []*schema.Column{
logsV2Columns[LogsV2BodyJSONColumn],
logsV2Columns[LogsV2BodyPromotedColumn],
},
evolutions: []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: LogsV2BodyJSONColumn,
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextBody,
FieldName: "__all__",
ReleaseTime: time.Unix(0, 0),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: LogsV2BodyPromotedColumn,
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextBody,
FieldName: "user.name",
ReleaseTime: time.Date(2024, 2, 2, 0, 0, 0, 0, time.UTC),
},
},
tsStart: uint64(time.Date(2024, 2, 3, 0, 0, 0, 0, time.UTC).UnixNano()),
tsEnd: uint64(time.Date(2024, 2, 15, 0, 0, 0, 0, time.UTC).UnixNano()),
expectedColumns: []string{LogsV2BodyPromotedColumn},
expectedEvols: []string{LogsV2BodyPromotedColumn},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
resultColumns, resultEvols, err := selectEvolutionsForColumns(tc.columns, tc.evolutions, tc.tsStart, tc.tsEnd)
if tc.expectedError {
assert.Contains(t, err.Error(), tc.errorStr)
} else {
require.NoError(t, err)
assert.Equal(t, len(tc.expectedColumns), len(resultColumns), "column count mismatch")
assert.Equal(t, len(tc.expectedEvols), len(resultEvols), "evolution count mismatch")
resultColumnNames := make([]string, len(resultColumns))
for i, col := range resultColumns {
resultColumnNames[i] = col.Name
}
resultEvolNames := make([]string, len(resultEvols))
for i, evol := range resultEvols {
resultEvolNames[i] = evol.ColumnName
}
for i := range tc.expectedColumns {
assert.Equal(t, resultColumnNames[i], tc.expectedColumns[i], "expected column missing: "+tc.expectedColumns[i])
}
for i := range tc.expectedEvols {
assert.Equal(t, resultEvolNames[i], tc.expectedEvols[i], "expected evolution missing: "+tc.expectedEvols[i])
}
// Verify sorting: should be descending by ReleaseTime
for i := 0; i < len(resultEvols)-1; i++ {
assert.True(t, !resultEvols[i].ReleaseTime.Before(resultEvols[i+1].ReleaseTime),
"evolutions should be sorted descending by ReleaseTime")
}
}
})
}
}

View File

@@ -1,6 +1,7 @@
package telemetrylogs package telemetrylogs
import ( import (
"context"
"testing" "testing"
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest" "github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
@@ -10,12 +11,14 @@ import (
// TestLikeAndILikeWithoutWildcards_Warns Tests that LIKE/ILIKE without wildcards add warnings and include docs URL // TestLikeAndILikeWithoutWildcards_Warns Tests that LIKE/ILIKE without wildcards add warnings and include docs URL
func TestLikeAndILikeWithoutWildcards_Warns(t *testing.T) { func TestLikeAndILikeWithoutWildcards_Warns(t *testing.T) {
ctx := context.Background()
fm := NewFieldMapper() fm := NewFieldMapper()
cb := NewConditionBuilder(fm) cb := NewConditionBuilder(fm)
keys := buildCompleteFieldKeyMap() keys := buildCompleteFieldKeyMap()
opts := querybuilder.FilterExprVisitorOpts{ opts := querybuilder.FilterExprVisitorOpts{
Context: ctx,
Logger: instrumentationtest.New().Logger(), Logger: instrumentationtest.New().Logger(),
FieldMapper: fm, FieldMapper: fm,
ConditionBuilder: cb, ConditionBuilder: cb,
@@ -33,7 +36,7 @@ func TestLikeAndILikeWithoutWildcards_Warns(t *testing.T) {
for _, expr := range tests { for _, expr := range tests {
t.Run(expr, func(t *testing.T) { t.Run(expr, func(t *testing.T) {
clause, err := querybuilder.PrepareWhereClause(expr, opts, 0, 0) clause, err := querybuilder.PrepareWhereClause(expr, opts)
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, clause) require.NotNil(t, clause)
@@ -52,6 +55,7 @@ func TestLikeAndILikeWithWildcards_NoWarn(t *testing.T) {
keys := buildCompleteFieldKeyMap() keys := buildCompleteFieldKeyMap()
opts := querybuilder.FilterExprVisitorOpts{ opts := querybuilder.FilterExprVisitorOpts{
Context: context.Background(),
Logger: instrumentationtest.New().Logger(), Logger: instrumentationtest.New().Logger(),
FieldMapper: fm, FieldMapper: fm,
ConditionBuilder: cb, ConditionBuilder: cb,
@@ -69,7 +73,7 @@ func TestLikeAndILikeWithWildcards_NoWarn(t *testing.T) {
for _, expr := range tests { for _, expr := range tests {
t.Run(expr, func(t *testing.T) { t.Run(expr, func(t *testing.T) {
clause, err := querybuilder.PrepareWhereClause(expr, opts, 0, 0) clause, err := querybuilder.PrepareWhereClause(expr, opts)
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, clause) require.NotNil(t, clause)

View File

@@ -1,6 +1,7 @@
package telemetrylogs package telemetrylogs
import ( import (
"context"
"fmt" "fmt"
"testing" "testing"
@@ -19,6 +20,7 @@ func TestFilterExprLogsBodyJSON(t *testing.T) {
keys := buildCompleteFieldKeyMap() keys := buildCompleteFieldKeyMap()
opts := querybuilder.FilterExprVisitorOpts{ opts := querybuilder.FilterExprVisitorOpts{
Context: context.Background(),
Logger: instrumentationtest.New().Logger(), Logger: instrumentationtest.New().Logger(),
FieldMapper: fm, FieldMapper: fm,
ConditionBuilder: cb, ConditionBuilder: cb,
@@ -161,7 +163,7 @@ func TestFilterExprLogsBodyJSON(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
t.Run(fmt.Sprintf("%s: %s", tc.category, limitString(tc.query, 50)), func(t *testing.T) { t.Run(fmt.Sprintf("%s: %s", tc.category, limitString(tc.query, 50)), func(t *testing.T) {
clause, err := querybuilder.PrepareWhereClause(tc.query, opts, 0, 0) clause, err := querybuilder.PrepareWhereClause(tc.query, opts)
if tc.shouldPass { if tc.shouldPass {
if err != nil { if err != nil {

View File

@@ -1,9 +1,11 @@
package telemetrylogs package telemetrylogs
import ( import (
"context"
"fmt" "fmt"
"strings" "strings"
"testing" "testing"
"time"
"github.com/SigNoz/signoz/pkg/errors" "github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest" "github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
@@ -15,19 +17,33 @@ import (
// TestFilterExprLogs tests a comprehensive set of query patterns for logs search // TestFilterExprLogs tests a comprehensive set of query patterns for logs search
func TestFilterExprLogs(t *testing.T) { func TestFilterExprLogs(t *testing.T) {
releaseTime := time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC)
ctx := context.Background()
fm := NewFieldMapper() fm := NewFieldMapper()
cb := NewConditionBuilder(fm) cb := NewConditionBuilder(fm)
// Define a comprehensive set of field keys to support all test cases // Define a comprehensive set of field keys to support all test cases
keys := buildCompleteFieldKeyMap() keys := buildCompleteFieldKeyMap()
// for each key of resource attribute add evolution metadata
for i, telemetryKeys := range keys {
for j, telemetryKey := range telemetryKeys {
if telemetryKey.FieldContext == telemetrytypes.FieldContextResource {
keys[i][j].Evolutions = mockEvolutionData(releaseTime)
}
}
}
opts := querybuilder.FilterExprVisitorOpts{ opts := querybuilder.FilterExprVisitorOpts{
Context: ctx,
Logger: instrumentationtest.New().Logger(), Logger: instrumentationtest.New().Logger(),
FieldMapper: fm, FieldMapper: fm,
ConditionBuilder: cb, ConditionBuilder: cb,
FieldKeys: keys, FieldKeys: keys,
FullTextColumn: DefaultFullTextColumn, FullTextColumn: DefaultFullTextColumn,
JsonKeyToKey: GetBodyJSONKey, JsonKeyToKey: GetBodyJSONKey,
StartNs: uint64(releaseTime.Add(-5 * time.Minute).UnixNano()),
EndNs: uint64(releaseTime.Add(5 * time.Minute).UnixNano()),
} }
testCases := []struct { testCases := []struct {
@@ -466,7 +482,7 @@ func TestFilterExprLogs(t *testing.T) {
expectedErrorContains: "", expectedErrorContains: "",
}, },
// fulltext with parenthesized expression //fulltext with parenthesized expression
{ {
category: "FREETEXT with parentheses", category: "FREETEXT with parentheses",
query: "error (status.code=500 OR status.code=503)", query: "error (status.code=500 OR status.code=503)",
@@ -2386,7 +2402,7 @@ func TestFilterExprLogs(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
t.Run(fmt.Sprintf("%s: %s", tc.category, limitString(tc.query, 50)), func(t *testing.T) { t.Run(fmt.Sprintf("%s: %s", tc.category, limitString(tc.query, 50)), func(t *testing.T) {
clause, err := querybuilder.PrepareWhereClause(tc.query, opts, 0, 0) clause, err := querybuilder.PrepareWhereClause(tc.query, opts)
if tc.shouldPass { if tc.shouldPass {
if err != nil { if err != nil {
@@ -2442,6 +2458,7 @@ func TestFilterExprLogsConflictNegation(t *testing.T) {
} }
opts := querybuilder.FilterExprVisitorOpts{ opts := querybuilder.FilterExprVisitorOpts{
Context: context.Background(),
Logger: instrumentationtest.New().Logger(), Logger: instrumentationtest.New().Logger(),
FieldMapper: fm, FieldMapper: fm,
ConditionBuilder: cb, ConditionBuilder: cb,
@@ -2504,7 +2521,7 @@ func TestFilterExprLogsConflictNegation(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
t.Run(fmt.Sprintf("%s: %s", tc.category, limitString(tc.query, 50)), func(t *testing.T) { t.Run(fmt.Sprintf("%s: %s", tc.category, limitString(tc.query, 50)), func(t *testing.T) {
clause, err := querybuilder.PrepareWhereClause(tc.query, opts, 0, 0) clause, err := querybuilder.PrepareWhereClause(tc.query, opts)
if tc.shouldPass { if tc.shouldPass {
if err != nil { if err != nil {

View File

@@ -268,7 +268,7 @@ func (b *logQueryStatementBuilder) buildListQuery(
} }
// get column expression for the field - use array index directly to avoid pointer to loop variable // get column expression for the field - use array index directly to avoid pointer to loop variable
colExpr, err := b.fm.ColumnExpressionFor(ctx, &query.SelectFields[index], keys) colExpr, err := b.fm.ColumnExpressionFor(ctx, start, end, &query.SelectFields[index], keys)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -277,7 +277,6 @@ func (b *logQueryStatementBuilder) buildListQuery(
} }
sb.From(fmt.Sprintf("%s.%s", DBName, LogsV2TableName)) sb.From(fmt.Sprintf("%s.%s", DBName, LogsV2TableName))
// Add filter conditions // Add filter conditions
preparedWhereClause, err := b.addFilterCondition(ctx, sb, start, end, query, keys, variables) preparedWhereClause, err := b.addFilterCondition(ctx, sb, start, end, query, keys, variables)
@@ -287,7 +286,8 @@ func (b *logQueryStatementBuilder) buildListQuery(
// Add order by // Add order by
for _, orderBy := range query.Order { for _, orderBy := range query.Order {
colExpr, err := b.fm.ColumnExpressionFor(ctx, &orderBy.Key.TelemetryFieldKey, keys)
colExpr, err := b.fm.ColumnExpressionFor(ctx, start, end, &orderBy.Key.TelemetryFieldKey, keys)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -353,7 +353,7 @@ func (b *logQueryStatementBuilder) buildTimeSeriesQuery(
// Keep original column expressions so we can build the tuple // Keep original column expressions so we can build the tuple
fieldNames := make([]string, 0, len(query.GroupBy)) fieldNames := make([]string, 0, len(query.GroupBy))
for _, gb := range query.GroupBy { for _, gb := range query.GroupBy {
expr, args, err := querybuilder.CollisionHandledFinalExpr(ctx, &gb.TelemetryFieldKey, b.fm, b.cb, keys, telemetrytypes.FieldDataTypeString, b.jsonKeyToKey) expr, args, err := querybuilder.CollisionHandledFinalExpr(ctx, start, end, &gb.TelemetryFieldKey, b.fm, b.cb, keys, telemetrytypes.FieldDataTypeString, b.jsonKeyToKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -368,7 +368,7 @@ func (b *logQueryStatementBuilder) buildTimeSeriesQuery(
allAggChArgs := make([]any, 0) allAggChArgs := make([]any, 0)
for i, agg := range query.Aggregations { for i, agg := range query.Aggregations {
rewritten, chArgs, err := b.aggExprRewriter.Rewrite( rewritten, chArgs, err := b.aggExprRewriter.Rewrite(
ctx, agg.Expression, ctx, start, end, agg.Expression,
uint64(query.StepInterval.Seconds()), uint64(query.StepInterval.Seconds()),
keys, keys,
) )
@@ -500,7 +500,7 @@ func (b *logQueryStatementBuilder) buildScalarQuery(
var allGroupByArgs []any var allGroupByArgs []any
for _, gb := range query.GroupBy { for _, gb := range query.GroupBy {
expr, args, err := querybuilder.CollisionHandledFinalExpr(ctx, &gb.TelemetryFieldKey, b.fm, b.cb, keys, telemetrytypes.FieldDataTypeString, b.jsonKeyToKey) expr, args, err := querybuilder.CollisionHandledFinalExpr(ctx, start, end, &gb.TelemetryFieldKey, b.fm, b.cb, keys, telemetrytypes.FieldDataTypeString, b.jsonKeyToKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -518,7 +518,7 @@ func (b *logQueryStatementBuilder) buildScalarQuery(
for idx := range query.Aggregations { for idx := range query.Aggregations {
aggExpr := query.Aggregations[idx] aggExpr := query.Aggregations[idx]
rewritten, chArgs, err := b.aggExprRewriter.Rewrite( rewritten, chArgs, err := b.aggExprRewriter.Rewrite(
ctx, aggExpr.Expression, ctx, start, end, aggExpr.Expression,
rateInterval, rateInterval,
keys, keys,
) )
@@ -590,7 +590,7 @@ func (b *logQueryStatementBuilder) buildScalarQuery(
// buildFilterCondition builds SQL condition from filter expression // buildFilterCondition builds SQL condition from filter expression
func (b *logQueryStatementBuilder) addFilterCondition( func (b *logQueryStatementBuilder) addFilterCondition(
_ context.Context, ctx context.Context,
sb *sqlbuilder.SelectBuilder, sb *sqlbuilder.SelectBuilder,
start, end uint64, start, end uint64,
query qbtypes.QueryBuilderQuery[qbtypes.LogAggregation], query qbtypes.QueryBuilderQuery[qbtypes.LogAggregation],
@@ -604,6 +604,7 @@ func (b *logQueryStatementBuilder) addFilterCondition(
if query.Filter != nil && query.Filter.Expression != "" { if query.Filter != nil && query.Filter.Expression != "" {
// add filter expression // add filter expression
preparedWhereClause, err = querybuilder.PrepareWhereClause(query.Filter.Expression, querybuilder.FilterExprVisitorOpts{ preparedWhereClause, err = querybuilder.PrepareWhereClause(query.Filter.Expression, querybuilder.FilterExprVisitorOpts{
Context: ctx,
Logger: b.logger, Logger: b.logger,
FieldMapper: b.fm, FieldMapper: b.fm,
ConditionBuilder: b.cb, ConditionBuilder: b.cb,
@@ -612,7 +613,9 @@ func (b *logQueryStatementBuilder) addFilterCondition(
FullTextColumn: b.fullTextColumn, FullTextColumn: b.fullTextColumn,
JsonKeyToKey: b.jsonKeyToKey, JsonKeyToKey: b.jsonKeyToKey,
Variables: variables, Variables: variables,
}, start, end) StartNs: start,
EndNs: end,
})
if err != nil { if err != nil {
return nil, err return nil, err

View File

@@ -37,7 +37,14 @@ func resourceFilterStmtBuilder() qbtypes.StatementBuilder[qbtypes.LogAggregation
} }
func TestStatementBuilderTimeSeries(t *testing.T) { func TestStatementBuilderTimeSeries(t *testing.T) {
// Create a test release time
releaseTime := time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC)
releaseTimeNano := uint64(releaseTime.UnixNano())
cases := []struct { cases := []struct {
startTs uint64
endTs uint64
name string name string
requestType qbtypes.RequestType requestType qbtypes.RequestType
query qbtypes.QueryBuilderQuery[qbtypes.LogAggregation] query qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]
@@ -45,14 +52,16 @@ func TestStatementBuilderTimeSeries(t *testing.T) {
expectedErr error expectedErr error
}{ }{
{ {
name: "Time series with limit", startTs: releaseTimeNano + uint64(24*time.Hour.Nanoseconds()),
endTs: releaseTimeNano + uint64(48*time.Hour.Nanoseconds()),
name: "Time series with limit and count distinct on service.name",
requestType: qbtypes.RequestTypeTimeSeries, requestType: qbtypes.RequestTypeTimeSeries,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{ query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs, Signal: telemetrytypes.SignalLogs,
StepInterval: qbtypes.Step{Duration: 30 * time.Second}, StepInterval: qbtypes.Step{Duration: 30 * time.Second},
Aggregations: []qbtypes.LogAggregation{ Aggregations: []qbtypes.LogAggregation{
{ {
Expression: "count()", Expression: "count_distinct(service.name)",
}, },
}, },
Filter: &qbtypes.Filter{ Filter: &qbtypes.Filter{
@@ -68,20 +77,22 @@ func TestStatementBuilderTimeSeries(t *testing.T) {
}, },
}, },
expected: qbtypes.Statement{ expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) IS NOT NULL, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL), NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) IS NOT NULL, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL), NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name`", Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(resource.`service.name`::String IS NOT NULL, resource.`service.name`::String, NULL)) AS `service.name`, countDistinct(multiIf(resource.`service.name`::String IS NOT NULL, resource.`service.name`::String, NULL)) AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(resource.`service.name`::String IS NOT NULL, resource.`service.name`::String, NULL)) AS `service.name`, countDistinct(multiIf(resource.`service.name`::String IS NOT NULL, resource.`service.name`::String, NULL)) AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name`",
Args: []any{"cartservice", "%service.name%", "%service.name\":\"cartservice%", uint64(1747945619), uint64(1747983448), "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10, "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448)}, Args: []any{"cartservice", "%service.name%", "%service.name\":\"cartservice%", uint64(1705397400), uint64(1705485600), "1705399200000000000", uint64(1705397400), "1705485600000000000", uint64(1705485600), 10, "1705399200000000000", uint64(1705397400), "1705485600000000000", uint64(1705485600)},
}, },
expectedErr: nil, expectedErr: nil,
}, },
{ {
name: "Time series with OR b/w resource attr and attribute filter", startTs: releaseTimeNano - uint64(24*time.Hour.Nanoseconds()),
endTs: releaseTimeNano + uint64(48*time.Hour.Nanoseconds()),
name: "Time series with OR b/w resource attr and attribute filter and count distinct on service.name",
requestType: qbtypes.RequestTypeTimeSeries, requestType: qbtypes.RequestTypeTimeSeries,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{ query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalTraces, Signal: telemetrytypes.SignalTraces,
StepInterval: qbtypes.Step{Duration: 30 * time.Second}, StepInterval: qbtypes.Step{Duration: 30 * time.Second},
Aggregations: []qbtypes.LogAggregation{ Aggregations: []qbtypes.LogAggregation{
{ {
Expression: "count()", Expression: "count_distinct(service.name)",
}, },
}, },
Filter: &qbtypes.Filter{ Filter: &qbtypes.Filter{
@@ -97,12 +108,14 @@ func TestStatementBuilderTimeSeries(t *testing.T) {
}, },
}, },
expected: qbtypes.Statement{ expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE ((simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) OR true) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) IS NOT NULL, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL), NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND ((multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) = ? AND multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) IS NOT NULL) OR (attributes_string['http.method'] = ? AND mapContains(attributes_string, 'http.method') = ?)) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) IS NOT NULL, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL), NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND ((multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) = ? AND multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) IS NOT NULL) OR (attributes_string['http.method'] = ? AND mapContains(attributes_string, 'http.method') = ?)) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name`", Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE ((simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) OR true) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) IS NOT NULL, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL), NULL)) AS `service.name`, countDistinct(multiIf(multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) IS NOT NULL, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL), NULL)) AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND ((multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) = ? AND multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) IS NOT NULL) OR (attributes_string['http.method'] = ? AND mapContains(attributes_string, 'http.method') = ?)) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) IS NOT NULL, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL), NULL)) AS `service.name`, countDistinct(multiIf(multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) IS NOT NULL, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL), NULL)) AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND ((multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) = ? AND multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) IS NOT NULL) OR (attributes_string['http.method'] = ? AND mapContains(attributes_string, 'http.method') = ?)) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name`",
Args: []any{"redis-manual", "%service.name%", "%service.name\":\"redis-manual%", uint64(1747945619), uint64(1747983448), "redis-manual", "GET", true, "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10, "redis-manual", "GET", true, "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448)}, Args: []any{"redis-manual", "%service.name%", "%service.name\":\"redis-manual%", uint64(1705224600), uint64(1705485600), "redis-manual", "GET", true, "1705226400000000000", uint64(1705224600), "1705485600000000000", uint64(1705485600), 10, "redis-manual", "GET", true, "1705226400000000000", uint64(1705224600), "1705485600000000000", uint64(1705485600)},
}, },
expectedErr: nil, expectedErr: nil,
}, },
{ {
startTs: releaseTimeNano + uint64(24*time.Hour.Nanoseconds()),
endTs: releaseTimeNano + uint64(48*time.Hour.Nanoseconds()),
name: "Time series with limit + custom order by", name: "Time series with limit + custom order by",
requestType: qbtypes.RequestTypeTimeSeries, requestType: qbtypes.RequestTypeTimeSeries,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{ query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
@@ -136,12 +149,14 @@ func TestStatementBuilderTimeSeries(t *testing.T) {
}, },
}, },
expected: qbtypes.Statement{ expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) IS NOT NULL, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL), NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY `service.name` desc LIMIT ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) IS NOT NULL, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL), NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name` ORDER BY `service.name` desc, ts desc", Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(resource.`service.name`::String IS NOT NULL, resource.`service.name`::String, NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? GROUP BY `service.name` ORDER BY `service.name` desc LIMIT ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(resource.`service.name`::String IS NOT NULL, resource.`service.name`::String, NULL)) AS `service.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? AND (`service.name`) GLOBAL IN (SELECT `service.name` FROM __limit_cte) GROUP BY ts, `service.name` ORDER BY `service.name` desc, ts desc",
Args: []any{"cartservice", "%service.name%", "%service.name\":\"cartservice%", uint64(1747945619), uint64(1747983448), "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10, "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448)}, Args: []any{"cartservice", "%service.name%", "%service.name\":\"cartservice%", uint64(1705397400), uint64(1705485600), "1705399200000000000", uint64(1705397400), "1705485600000000000", uint64(1705485600), 10, "1705399200000000000", uint64(1705397400), "1705485600000000000", uint64(1705485600)},
}, },
expectedErr: nil, expectedErr: nil,
}, },
{ {
startTs: releaseTimeNano + uint64(24*time.Hour.Nanoseconds()),
endTs: releaseTimeNano + uint64(48*time.Hour.Nanoseconds()),
name: "Time series with group by on materialized column", name: "Time series with group by on materialized column",
requestType: qbtypes.RequestTypeTimeSeries, requestType: qbtypes.RequestTypeTimeSeries,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{ query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
@@ -168,10 +183,12 @@ func TestStatementBuilderTimeSeries(t *testing.T) {
}, },
expected: qbtypes.Statement{ expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(`attribute_string_materialized$$key$$name_exists` = ?, `attribute_string_materialized$$key$$name`, NULL)) AS `materialized.key.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? GROUP BY `materialized.key.name` ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(`attribute_string_materialized$$key$$name_exists` = ?, `attribute_string_materialized$$key$$name`, NULL)) AS `materialized.key.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? AND (`materialized.key.name`) GLOBAL IN (SELECT `materialized.key.name` FROM __limit_cte) GROUP BY ts, `materialized.key.name`", Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), __limit_cte AS (SELECT toString(multiIf(`attribute_string_materialized$$key$$name_exists` = ?, `attribute_string_materialized$$key$$name`, NULL)) AS `materialized.key.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? GROUP BY `materialized.key.name` ORDER BY __result_0 DESC LIMIT ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(`attribute_string_materialized$$key$$name_exists` = ?, `attribute_string_materialized$$key$$name`, NULL)) AS `materialized.key.name`, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND true AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? AND (`materialized.key.name`) GLOBAL IN (SELECT `materialized.key.name` FROM __limit_cte) GROUP BY ts, `materialized.key.name`",
Args: []any{"cartservice", "%service.name%", "%service.name\":\"cartservice%", uint64(1747945619), uint64(1747983448), true, "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448), 10, true, "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448)}, Args: []any{"cartservice", "%service.name%", "%service.name\":\"cartservice%", uint64(1705397400), uint64(1705485600), true, "1705399200000000000", uint64(1705397400), "1705485600000000000", uint64(1705485600), 10, true, "1705399200000000000", uint64(1705397400), "1705485600000000000", uint64(1705485600)},
}, },
}, },
{ {
startTs: releaseTimeNano + uint64(24*time.Hour.Nanoseconds()),
endTs: releaseTimeNano + uint64(48*time.Hour.Nanoseconds()),
name: "Time series with materialised column using or with regex operator", name: "Time series with materialised column using or with regex operator",
requestType: qbtypes.RequestTypeTimeSeries, requestType: qbtypes.RequestTypeTimeSeries,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{ query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
@@ -189,14 +206,29 @@ func TestStatementBuilderTimeSeries(t *testing.T) {
}, },
expected: qbtypes.Statement{ expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (true OR true) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND ((match(`attribute_string_materialized$$key$$name`, ?) AND `attribute_string_materialized$$key$$name_exists` = ?) OR (`attribute_string_materialized$$key$$name` = ? AND `attribute_string_materialized$$key$$name_exists` = ?)) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? GROUP BY ts", Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_logs.distributed_logs_v2_resource WHERE (true OR true) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, count() AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND ((match(`attribute_string_materialized$$key$$name`, ?) AND `attribute_string_materialized$$key$$name_exists` = ?) OR (`attribute_string_materialized$$key$$name` = ? AND `attribute_string_materialized$$key$$name_exists` = ?)) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? GROUP BY ts",
Args: []any{uint64(1747945619), uint64(1747983448), "redis.*", true, "memcached", true, "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448)}, Args: []any{uint64(1705397400), uint64(1705485600), "redis.*", true, "memcached", true, "1705399200000000000", uint64(1705397400), "1705485600000000000", uint64(1705485600)},
}, },
expectedErr: nil, expectedErr: nil,
}, },
} }
ctx := context.Background()
mockMetadataStore := telemetrytypestest.NewMockMetadataStore() mockMetadataStore := telemetrytypestest.NewMockMetadataStore()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap() keysMap := buildCompleteFieldKeyMap()
// for each key of resource attribute add evolution metadata
for i, telemetryKeys := range keysMap {
for j, telemetryKey := range telemetryKeys {
if telemetryKey.FieldContext == telemetrytypes.FieldContextResource {
keysMap[i][j].Signal = telemetrytypes.SignalLogs
keysMap[i][j].Evolutions = mockEvolutionData(releaseTime)
}
}
}
mockMetadataStore.KeysMap = keysMap
fm := NewFieldMapper() fm := NewFieldMapper()
cb := NewConditionBuilder(fm) cb := NewConditionBuilder(fm)
@@ -218,7 +250,7 @@ func TestStatementBuilderTimeSeries(t *testing.T) {
for _, c := range cases { for _, c := range cases {
t.Run(c.name, func(t *testing.T) { t.Run(c.name, func(t *testing.T) {
q, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, c.requestType, c.query, nil) q, err := statementBuilder.Build(ctx, c.startTs, c.endTs, c.requestType, c.query, nil)
if c.expectedErr != nil { if c.expectedErr != nil {
require.Error(t, err) require.Error(t, err)
@@ -315,9 +347,10 @@ func TestStatementBuilderListQuery(t *testing.T) {
}, },
} }
ctx := context.Background()
mockMetadataStore := telemetrytypestest.NewMockMetadataStore() mockMetadataStore := telemetrytypestest.NewMockMetadataStore()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap()
fm := NewFieldMapper() fm := NewFieldMapper()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap()
cb := NewConditionBuilder(fm) cb := NewConditionBuilder(fm)
aggExprRewriter := querybuilder.NewAggExprRewriter(instrumentationtest.New().ToProviderSettings(), nil, fm, cb, nil) aggExprRewriter := querybuilder.NewAggExprRewriter(instrumentationtest.New().ToProviderSettings(), nil, fm, cb, nil)
@@ -338,7 +371,7 @@ func TestStatementBuilderListQuery(t *testing.T) {
for _, c := range cases { for _, c := range cases {
t.Run(c.name, func(t *testing.T) { t.Run(c.name, func(t *testing.T) {
q, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, c.requestType, c.query, nil) q, err := statementBuilder.Build(ctx, 1747947419000, 1747983448000, c.requestType, c.query, nil)
if c.expectedErr != nil { if c.expectedErr != nil {
require.Error(t, err) require.Error(t, err)
@@ -455,9 +488,10 @@ func TestStatementBuilderListQueryResourceTests(t *testing.T) {
}, },
} }
ctx := context.Background()
mockMetadataStore := telemetrytypestest.NewMockMetadataStore() mockMetadataStore := telemetrytypestest.NewMockMetadataStore()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap()
fm := NewFieldMapper() fm := NewFieldMapper()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap()
cb := NewConditionBuilder(fm) cb := NewConditionBuilder(fm)
aggExprRewriter := querybuilder.NewAggExprRewriter(instrumentationtest.New().ToProviderSettings(), nil, fm, cb, nil) aggExprRewriter := querybuilder.NewAggExprRewriter(instrumentationtest.New().ToProviderSettings(), nil, fm, cb, nil)
@@ -475,12 +509,10 @@ func TestStatementBuilderListQueryResourceTests(t *testing.T) {
GetBodyJSONKey, GetBodyJSONKey,
) )
//
for _, c := range cases { for _, c := range cases {
t.Run(c.name, func(t *testing.T) { t.Run(c.name, func(t *testing.T) {
q, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, c.requestType, c.query, nil) q, err := statementBuilder.Build(ctx, 1747947419000, 1747983448000, c.requestType, c.query, nil)
if c.expectedErr != nil { if c.expectedErr != nil {
require.Error(t, err) require.Error(t, err)
@@ -531,9 +563,10 @@ func TestStatementBuilderTimeSeriesBodyGroupBy(t *testing.T) {
}, },
} }
ctx := context.Background()
mockMetadataStore := telemetrytypestest.NewMockMetadataStore() mockMetadataStore := telemetrytypestest.NewMockMetadataStore()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap()
fm := NewFieldMapper() fm := NewFieldMapper()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMap()
cb := NewConditionBuilder(fm) cb := NewConditionBuilder(fm)
aggExprRewriter := querybuilder.NewAggExprRewriter(instrumentationtest.New().ToProviderSettings(), nil, fm, cb, nil) aggExprRewriter := querybuilder.NewAggExprRewriter(instrumentationtest.New().ToProviderSettings(), nil, fm, cb, nil)
@@ -554,7 +587,7 @@ func TestStatementBuilderTimeSeriesBodyGroupBy(t *testing.T) {
for _, c := range cases { for _, c := range cases {
t.Run(c.name, func(t *testing.T) { t.Run(c.name, func(t *testing.T) {
q, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, c.requestType, c.query, nil) q, err := statementBuilder.Build(ctx, 1747947419000, 1747983448000, c.requestType, c.query, nil)
if c.expectedErrContains != "" { if c.expectedErrContains != "" {
require.Error(t, err) require.Error(t, err)
@@ -626,9 +659,10 @@ func TestStatementBuilderListQueryServiceCollision(t *testing.T) {
}, },
} }
ctx := context.Background()
mockMetadataStore := telemetrytypestest.NewMockMetadataStore() mockMetadataStore := telemetrytypestest.NewMockMetadataStore()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMapCollision()
fm := NewFieldMapper() fm := NewFieldMapper()
mockMetadataStore.KeysMap = buildCompleteFieldKeyMapCollision()
cb := NewConditionBuilder(fm) cb := NewConditionBuilder(fm)
aggExprRewriter := querybuilder.NewAggExprRewriter(instrumentationtest.New().ToProviderSettings(), nil, fm, cb, nil) aggExprRewriter := querybuilder.NewAggExprRewriter(instrumentationtest.New().ToProviderSettings(), nil, fm, cb, nil)
@@ -649,7 +683,7 @@ func TestStatementBuilderListQueryServiceCollision(t *testing.T) {
for _, c := range cases { for _, c := range cases {
t.Run(c.name, func(t *testing.T) { t.Run(c.name, func(t *testing.T) {
q, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, c.requestType, c.query, nil) q, err := statementBuilder.Build(ctx, 1747947419000, 1747983448000, c.requestType, c.query, nil)
if c.expectedErr != nil { if c.expectedErr != nil {
require.Error(t, err) require.Error(t, err)

View File

@@ -2,6 +2,7 @@ package telemetrylogs
import ( import (
"strings" "strings"
"time"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes" "github.com/SigNoz/signoz/pkg/types/telemetrytypes"
) )
@@ -1007,3 +1008,24 @@ func buildCompleteFieldKeyMapCollision() map[string][]*telemetrytypes.TelemetryF
} }
return keysMap return keysMap
} }
func mockEvolutionData(releaseTime time.Time) []*telemetrytypes.EvolutionEntry {
return []*telemetrytypes.EvolutionEntry{
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resources_string",
FieldContext: telemetrytypes.FieldContextResource,
ColumnType: "Map(LowCardinality(String), String)",
FieldName: "__all__",
ReleaseTime: time.Unix(0, 0),
},
{
Signal: telemetrytypes.SignalLogs,
ColumnName: "resource",
ColumnType: "JSON()",
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
ReleaseTime: releaseTime,
},
}
}

View File

@@ -21,12 +21,11 @@ func NewConditionBuilder(fm qbtypes.FieldMapper) *conditionBuilder {
func (c *conditionBuilder) ConditionFor( func (c *conditionBuilder) ConditionFor(
ctx context.Context, ctx context.Context,
tsStart, tsEnd uint64,
key *telemetrytypes.TelemetryFieldKey, key *telemetrytypes.TelemetryFieldKey,
operator qbtypes.FilterOperator, operator qbtypes.FilterOperator,
value any, value any,
sb *sqlbuilder.SelectBuilder, sb *sqlbuilder.SelectBuilder,
_ uint64,
_ uint64,
) (string, error) { ) (string, error) {
switch operator { switch operator {
@@ -39,13 +38,13 @@ func (c *conditionBuilder) ConditionFor(
value = querybuilder.FormatValueForContains(value) value = querybuilder.FormatValueForContains(value)
} }
column, err := c.fm.ColumnFor(ctx, key) columns, err := c.fm.ColumnFor(ctx, tsStart, tsEnd, key)
if err != nil { if err != nil {
// if we don't have a column, we can't build a condition for related values // if we don't have a column, we can't build a condition for related values
return "", nil return "", nil
} }
tblFieldName, err := c.fm.FieldFor(ctx, key) tblFieldName, err := c.fm.FieldFor(ctx, tsStart, tsEnd, key)
if err != nil { if err != nil {
// if we don't have a table field name, we can't build a condition for related values // if we don't have a table field name, we can't build a condition for related values
return "", nil return "", nil
@@ -120,12 +119,12 @@ func (c *conditionBuilder) ConditionFor(
// in the query builder, `exists` and `not exists` are used for // in the query builder, `exists` and `not exists` are used for
// key membership checks, so depending on the column type, the condition changes // key membership checks, so depending on the column type, the condition changes
case qbtypes.FilterOperatorExists, qbtypes.FilterOperatorNotExists: case qbtypes.FilterOperatorExists, qbtypes.FilterOperatorNotExists:
switch column.Type { switch columns[0].Type {
case schema.MapColumnType{ case schema.MapColumnType{
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}, KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
ValueType: schema.ColumnTypeString, ValueType: schema.ColumnTypeString,
}: }:
leftOperand := fmt.Sprintf("mapContains(%s, '%s')", column.Name, key.Name) leftOperand := fmt.Sprintf("mapContains(%s, '%s')", columns[0].Name, key.Name)
if operator == qbtypes.FilterOperatorExists { if operator == qbtypes.FilterOperatorExists {
cond = sb.E(leftOperand, true) cond = sb.E(leftOperand, true)
} else { } else {
@@ -134,5 +133,5 @@ func (c *conditionBuilder) ConditionFor(
} }
} }
return fmt.Sprintf(expr, column.Name, sb.Var(key.Name), cond), nil return fmt.Sprintf(expr, columns[0].Name, sb.Var(key.Name), cond), nil
} }

View File

@@ -53,7 +53,7 @@ func TestConditionFor(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
sb := sqlbuilder.NewSelectBuilder() sb := sqlbuilder.NewSelectBuilder()
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
cond, err := conditionBuilder.ConditionFor(ctx, &tc.key, tc.operator, tc.value, sb, 0, 0) cond, err := conditionBuilder.ConditionFor(ctx, 0, 0, &tc.key, tc.operator, tc.value, sb)
sb.Where(cond) sb.Where(cond)
if tc.expectedError != nil { if tc.expectedError != nil {

View File

@@ -33,47 +33,48 @@ func NewFieldMapper() qbtypes.FieldMapper {
return &fieldMapper{} return &fieldMapper{}
} }
func (m *fieldMapper) getColumn(_ context.Context, key *telemetrytypes.TelemetryFieldKey) (*schema.Column, error) { func (m *fieldMapper) getColumn(_ context.Context, _, _ uint64, key *telemetrytypes.TelemetryFieldKey) ([]*schema.Column, error) {
switch key.FieldContext { switch key.FieldContext {
case telemetrytypes.FieldContextResource: case telemetrytypes.FieldContextResource:
return attributeMetadataColumns["resource_attributes"], nil return []*schema.Column{attributeMetadataColumns["resource_attributes"]}, nil
case telemetrytypes.FieldContextAttribute: case telemetrytypes.FieldContextAttribute:
return attributeMetadataColumns["attributes"], nil return []*schema.Column{attributeMetadataColumns["attributes"]}, nil
} }
return nil, qbtypes.ErrColumnNotFound return nil, qbtypes.ErrColumnNotFound
} }
func (m *fieldMapper) ColumnFor(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (*schema.Column, error) { func (m *fieldMapper) ColumnFor(ctx context.Context, tsStart, tsEnd uint64, key *telemetrytypes.TelemetryFieldKey) ([]*schema.Column, error) {
column, err := m.getColumn(ctx, key) columns, err := m.getColumn(ctx, tsStart, tsEnd, key)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return column, nil return columns, nil
} }
func (m *fieldMapper) FieldFor(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (string, error) { func (m *fieldMapper) FieldFor(ctx context.Context, startNs, endNs uint64, key *telemetrytypes.TelemetryFieldKey) (string, error) {
column, err := m.getColumn(ctx, key) columns, err := m.getColumn(ctx, startNs, endNs, key)
if err != nil { if err != nil {
return "", err return "", err
} }
switch column.Type { switch columns[0].Type {
case schema.MapColumnType{ case schema.MapColumnType{
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}, KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
ValueType: schema.ColumnTypeString, ValueType: schema.ColumnTypeString,
}: }:
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil return fmt.Sprintf("%s['%s']", columns[0].Name, key.Name), nil
} }
return column.Name, nil return columns[0].Name, nil
} }
func (m *fieldMapper) ColumnExpressionFor( func (m *fieldMapper) ColumnExpressionFor(
ctx context.Context, ctx context.Context,
startNs, endNs uint64,
field *telemetrytypes.TelemetryFieldKey, field *telemetrytypes.TelemetryFieldKey,
keys map[string][]*telemetrytypes.TelemetryFieldKey, keys map[string][]*telemetrytypes.TelemetryFieldKey,
) (string, error) { ) (string, error) {
colName, err := m.FieldFor(ctx, field) colName, err := m.FieldFor(ctx, startNs, endNs, field)
if errors.Is(err, qbtypes.ErrColumnNotFound) { if errors.Is(err, qbtypes.ErrColumnNotFound) {
// the key didn't have the right context to be added to the query // the key didn't have the right context to be added to the query
// we try to use the context we know of // we try to use the context we know of
@@ -83,7 +84,7 @@ func (m *fieldMapper) ColumnExpressionFor(
if _, ok := attributeMetadataColumns[field.Name]; ok { if _, ok := attributeMetadataColumns[field.Name]; ok {
// if it is, attach the column name directly // if it is, attach the column name directly
field.FieldContext = telemetrytypes.FieldContextSpan field.FieldContext = telemetrytypes.FieldContextSpan
colName, _ = m.FieldFor(ctx, field) colName, _ = m.FieldFor(ctx, startNs, endNs, field)
} else { } else {
// - the context is not provided // - the context is not provided
// - there are not keys for the field // - there are not keys for the field
@@ -101,12 +102,12 @@ func (m *fieldMapper) ColumnExpressionFor(
} }
} else if len(keysForField) == 1 { } else if len(keysForField) == 1 {
// we have a single key for the field, use it // we have a single key for the field, use it
colName, _ = m.FieldFor(ctx, keysForField[0]) colName, _ = m.FieldFor(ctx, startNs, endNs, keysForField[0])
} else { } else {
// select any non-empty value from the keys // select any non-empty value from the keys
args := []string{} args := []string{}
for _, key := range keysForField { for _, key := range keysForField {
colName, _ = m.FieldFor(ctx, key) colName, _ = m.FieldFor(ctx, startNs, endNs, key)
args = append(args, fmt.Sprintf("toString(%s) != '', toString(%s)", colName, colName)) args = append(args, fmt.Sprintf("toString(%s) != '', toString(%s)", colName, colName))
} }
colName = fmt.Sprintf("multiIf(%s, NULL)", strings.Join(args, ", ")) colName = fmt.Sprintf("multiIf(%s, NULL)", strings.Join(args, ", "))

View File

@@ -128,13 +128,13 @@ func TestGetColumn(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
col, err := fm.ColumnFor(context.Background(), &tc.key) col, err := fm.ColumnFor(context.Background(), 0, 0, &tc.key)
if tc.expectedError != nil { if tc.expectedError != nil {
assert.Equal(t, tc.expectedError, err) assert.Equal(t, tc.expectedError, err)
} else { } else {
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, tc.expectedCol, col) assert.Equal(t, tc.expectedCol, col[0])
} }
}) })
} }
@@ -145,6 +145,8 @@ func TestGetFieldKeyName(t *testing.T) {
testCases := []struct { testCases := []struct {
name string name string
tsStart uint64
tsEnd uint64
key telemetrytypes.TelemetryFieldKey key telemetrytypes.TelemetryFieldKey
expectedResult string expectedResult string
expectedError error expectedError error
@@ -203,7 +205,7 @@ func TestGetFieldKeyName(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
result, err := fm.FieldFor(ctx, &tc.key) result, err := fm.FieldFor(ctx, tc.tsStart, tc.tsEnd, &tc.key)
if tc.expectedError != nil { if tc.expectedError != nil {
assert.Equal(t, tc.expectedError, err) assert.Equal(t, tc.expectedError, err)

View File

@@ -2,9 +2,11 @@ package telemetrymetadata
import ( import (
"context" "context"
"encoding/json"
"fmt" "fmt"
"log/slog" "log/slog"
"strings" "strings"
"time"
"github.com/SigNoz/signoz/pkg/errors" "github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory" "github.com/SigNoz/signoz/pkg/factory"
@@ -13,6 +15,7 @@ import (
"github.com/SigNoz/signoz/pkg/telemetrymetrics" "github.com/SigNoz/signoz/pkg/telemetrymetrics"
"github.com/SigNoz/signoz/pkg/telemetrystore" "github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/telemetrytraces" "github.com/SigNoz/signoz/pkg/telemetrytraces"
"github.com/SigNoz/signoz/pkg/types/cachetypes"
"github.com/SigNoz/signoz/pkg/types/metrictypes" "github.com/SigNoz/signoz/pkg/types/metrictypes"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5" qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes" "github.com/SigNoz/signoz/pkg/types/telemetrytypes"
@@ -31,23 +34,24 @@ var (
) )
type telemetryMetaStore struct { type telemetryMetaStore struct {
logger *slog.Logger logger *slog.Logger
telemetrystore telemetrystore.TelemetryStore telemetrystore telemetrystore.TelemetryStore
tracesDBName string tracesDBName string
tracesFieldsTblName string tracesFieldsTblName string
spanAttributesKeysTblName string spanAttributesKeysTblName string
indexV3TblName string indexV3TblName string
metricsDBName string metricsDBName string
metricsFieldsTblName string metricsFieldsTblName string
meterDBName string meterDBName string
meterFieldsTblName string meterFieldsTblName string
logsDBName string logsDBName string
logsFieldsTblName string logsFieldsTblName string
logAttributeKeysTblName string logAttributeKeysTblName string
logResourceKeysTblName string logResourceKeysTblName string
logsV2TblName string logsV2TblName string
relatedMetadataDBName string relatedMetadataDBName string
relatedMetadataTblName string relatedMetadataTblName string
columnEvolutionMetadataTblName string
fm qbtypes.FieldMapper fm qbtypes.FieldMapper
conditionBuilder qbtypes.ConditionBuilder conditionBuilder qbtypes.ConditionBuilder
@@ -76,27 +80,29 @@ func NewTelemetryMetaStore(
logResourceKeysTblName string, logResourceKeysTblName string,
relatedMetadataDBName string, relatedMetadataDBName string,
relatedMetadataTblName string, relatedMetadataTblName string,
columnEvolutionMetadataTblName string,
) telemetrytypes.MetadataStore { ) telemetrytypes.MetadataStore {
metadataSettings := factory.NewScopedProviderSettings(settings, "github.com/SigNoz/signoz/pkg/telemetrymetadata") metadataSettings := factory.NewScopedProviderSettings(settings, "github.com/SigNoz/signoz/pkg/telemetrymetadata")
t := &telemetryMetaStore{ t := &telemetryMetaStore{
logger: metadataSettings.Logger(), logger: metadataSettings.Logger(),
telemetrystore: telemetrystore, telemetrystore: telemetrystore,
tracesDBName: tracesDBName, tracesDBName: tracesDBName,
tracesFieldsTblName: tracesFieldsTblName, tracesFieldsTblName: tracesFieldsTblName,
spanAttributesKeysTblName: spanAttributesKeysTblName, spanAttributesKeysTblName: spanAttributesKeysTblName,
indexV3TblName: indexV3TblName, indexV3TblName: indexV3TblName,
metricsDBName: metricsDBName, metricsDBName: metricsDBName,
metricsFieldsTblName: metricsFieldsTblName, metricsFieldsTblName: metricsFieldsTblName,
meterDBName: meterDBName, meterDBName: meterDBName,
meterFieldsTblName: meterFieldsTblName, meterFieldsTblName: meterFieldsTblName,
logsDBName: logsDBName, logsDBName: logsDBName,
logsV2TblName: logsV2TblName, logsV2TblName: logsV2TblName,
logsFieldsTblName: logsFieldsTblName, logsFieldsTblName: logsFieldsTblName,
logAttributeKeysTblName: logAttributeKeysTblName, logAttributeKeysTblName: logAttributeKeysTblName,
logResourceKeysTblName: logResourceKeysTblName, logResourceKeysTblName: logResourceKeysTblName,
relatedMetadataDBName: relatedMetadataDBName, relatedMetadataDBName: relatedMetadataDBName,
relatedMetadataTblName: relatedMetadataTblName, relatedMetadataTblName: relatedMetadataTblName,
columnEvolutionMetadataTblName: columnEvolutionMetadataTblName,
jsonColumnMetadata: map[telemetrytypes.Signal]map[telemetrytypes.FieldContext]telemetrytypes.JSONColumnMetadata{ jsonColumnMetadata: map[telemetrytypes.Signal]map[telemetrytypes.FieldContext]telemetrytypes.JSONColumnMetadata{
telemetrytypes.SignalLogs: { telemetrytypes.SignalLogs: {
telemetrytypes.FieldContextBody: telemetrytypes.JSONColumnMetadata{ telemetrytypes.FieldContextBody: telemetrytypes.JSONColumnMetadata{
@@ -563,9 +569,48 @@ func (t *telemetryMetaStore) getLogsKeys(ctx context.Context, fieldKeySelectors
keys = append(keys, bodyJSONPaths...) keys = append(keys, bodyJSONPaths...)
complete = complete && finished complete = complete && finished
} }
// fetch and add evolutions
evolutionMetadataKeySelectors := getEvolutionMetadataKeySelectors(keys)
evolutions, err := t.GetColumnEvolutionMetadataMulti(ctx, evolutionMetadataKeySelectors)
if err != nil {
return nil, false, err
}
for i, key := range keys {
// first check if there is evolutions that with field name as __all__
// then check for specific field name
selector := &telemetrytypes.EvolutionSelector{
Signal: key.Signal,
FieldContext: key.FieldContext,
FieldName: "__all__",
}
if keyEvolutions, ok := evolutions[telemetrytypes.GetEvolutionMetadataUniqueKey(selector)]; ok {
keys[i].Evolutions = keyEvolutions
}
selector.FieldName = key.Name
if keyEvolutions, ok := evolutions[telemetrytypes.GetEvolutionMetadataUniqueKey(selector)]; ok {
keys[i].Evolutions = keyEvolutions
}
}
return keys, complete, nil return keys, complete, nil
} }
func getEvolutionMetadataKeySelectors(keySelectors []*telemetrytypes.TelemetryFieldKey) []*telemetrytypes.EvolutionSelector {
var metadataKeySelectors []*telemetrytypes.EvolutionSelector
for _, keySelector := range keySelectors {
selector := &telemetrytypes.EvolutionSelector{
Signal: keySelector.Signal,
FieldContext: keySelector.FieldContext,
FieldName: keySelector.Name,
}
metadataKeySelectors = append(metadataKeySelectors, selector)
}
return metadataKeySelectors
}
func getPriorityForContext(ctx telemetrytypes.FieldContext) int { func getPriorityForContext(ctx telemetrytypes.FieldContext) int {
switch ctx { switch ctx {
case telemetrytypes.FieldContextLog: case telemetrytypes.FieldContextLog:
@@ -986,18 +1031,18 @@ func (t *telemetryMetaStore) getRelatedValues(ctx context.Context, fieldValueSel
FieldDataType: fieldValueSelector.FieldDataType, FieldDataType: fieldValueSelector.FieldDataType,
} }
selectColumn, err := t.fm.FieldFor(ctx, key) selectColumn, err := t.fm.FieldFor(ctx, 0, 0, key)
if err != nil { if err != nil {
// we don't have a explicit column to select from the related metadata table // we don't have a explicit column to select from the related metadata table
// so we will select either from resource_attributes or attributes table // so we will select either from resource_attributes or attributes table
// in that order // in that order
resourceColumn, _ := t.fm.FieldFor(ctx, &telemetrytypes.TelemetryFieldKey{ resourceColumn, _ := t.fm.FieldFor(ctx, 0, 0, &telemetrytypes.TelemetryFieldKey{
Name: key.Name, Name: key.Name,
FieldContext: telemetrytypes.FieldContextResource, FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString, FieldDataType: telemetrytypes.FieldDataTypeString,
}) })
attributeColumn, _ := t.fm.FieldFor(ctx, &telemetrytypes.TelemetryFieldKey{ attributeColumn, _ := t.fm.FieldFor(ctx, 0, 0, &telemetrytypes.TelemetryFieldKey{
Name: key.Name, Name: key.Name,
FieldContext: telemetrytypes.FieldContextAttribute, FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeString, FieldDataType: telemetrytypes.FieldDataTypeString,
@@ -1018,11 +1063,12 @@ func (t *telemetryMetaStore) getRelatedValues(ctx context.Context, fieldValueSel
} }
whereClause, err := querybuilder.PrepareWhereClause(fieldValueSelector.ExistingQuery, querybuilder.FilterExprVisitorOpts{ whereClause, err := querybuilder.PrepareWhereClause(fieldValueSelector.ExistingQuery, querybuilder.FilterExprVisitorOpts{
Context: ctx,
Logger: t.logger, Logger: t.logger,
FieldMapper: t.fm, FieldMapper: t.fm,
ConditionBuilder: t.conditionBuilder, ConditionBuilder: t.conditionBuilder,
FieldKeys: keys, FieldKeys: keys,
}, 0, 0) })
if err == nil { if err == nil {
sb.AddWhereClause(whereClause.WhereClause) sb.AddWhereClause(whereClause.WhereClause)
} else { } else {
@@ -1046,20 +1092,20 @@ func (t *telemetryMetaStore) getRelatedValues(ctx context.Context, fieldValueSel
// search on attributes // search on attributes
key.FieldContext = telemetrytypes.FieldContextAttribute key.FieldContext = telemetrytypes.FieldContextAttribute
cond, err := t.conditionBuilder.ConditionFor(ctx, key, qbtypes.FilterOperatorContains, fieldValueSelector.Value, sb, 0, 0) cond, err := t.conditionBuilder.ConditionFor(ctx, 0, 0, key, qbtypes.FilterOperatorContains, fieldValueSelector.Value, sb)
if err == nil { if err == nil {
conds = append(conds, cond) conds = append(conds, cond)
} }
// search on resource // search on resource
key.FieldContext = telemetrytypes.FieldContextResource key.FieldContext = telemetrytypes.FieldContextResource
cond, err = t.conditionBuilder.ConditionFor(ctx, key, qbtypes.FilterOperatorContains, fieldValueSelector.Value, sb, 0, 0) cond, err = t.conditionBuilder.ConditionFor(ctx, 0, 0, key, qbtypes.FilterOperatorContains, fieldValueSelector.Value, sb)
if err == nil { if err == nil {
conds = append(conds, cond) conds = append(conds, cond)
} }
key.FieldContext = origContext key.FieldContext = origContext
} else { } else {
cond, err := t.conditionBuilder.ConditionFor(ctx, key, qbtypes.FilterOperatorContains, fieldValueSelector.Value, sb, 0, 0) cond, err := t.conditionBuilder.ConditionFor(ctx, 0, 0, key, qbtypes.FilterOperatorContains, fieldValueSelector.Value, sb)
if err == nil { if err == nil {
conds = append(conds, cond) conds = append(conds, cond)
} }
@@ -1764,6 +1810,103 @@ func (t *telemetryMetaStore) fetchMeterSourceMetricsTemporality(ctx context.Cont
return result, nil return result, nil
} }
// CachedColumnEvolutionMetadata is a cacheable type for storing column evolution metadata
type CachedEvolutionEntry struct {
Metadata []*telemetrytypes.EvolutionEntry `json:"metadata"`
}
var _ cachetypes.Cacheable = (*CachedEvolutionEntry)(nil)
func (c *CachedEvolutionEntry) MarshalBinary() ([]byte, error) {
return json.Marshal(c)
}
func (c *CachedEvolutionEntry) UnmarshalBinary(data []byte) error {
return json.Unmarshal(data, c)
}
func (k *telemetryMetaStore) fetchEvolutionEntryFromClickHouse(ctx context.Context, selectors []*telemetrytypes.EvolutionSelector) ([]*telemetrytypes.EvolutionEntry, error) {
sb := sqlbuilder.NewSelectBuilder()
sb.Select("signal", "column_name", "column_type", "field_context", "field_name", "version", "release_time")
sb.From(fmt.Sprintf("%s.%s", k.relatedMetadataDBName, k.columnEvolutionMetadataTblName))
sb.OrderBy("release_time ASC")
var clauses []string
for _, selector := range selectors {
var clause string
if selector.FieldContext != telemetrytypes.FieldContextUnspecified {
clause = sb.E("field_context", selector.FieldContext)
}
if selector.FieldName != "" {
clause = sb.And(clause,
sb.Or(sb.E("field_name", selector.FieldName), sb.E("field_name", "__all__")),
)
} else {
clause = sb.And(clause, sb.E("field_name", "__all__"))
}
clauses = append(clauses, sb.And(sb.E("signal", selector.Signal), clause))
}
sb.Where(sb.Or(clauses...))
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
var entries []*telemetrytypes.EvolutionEntry
rows, err := k.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var entry telemetrytypes.EvolutionEntry
var releaseTimeNs float64
if err := rows.Scan(
&entry.Signal,
&entry.ColumnName,
&entry.ColumnType,
&entry.FieldContext,
&entry.FieldName,
&entry.Version,
&releaseTimeNs,
); err != nil {
return nil, err
}
// Convert nanoseconds to time.Time
releaseTime := time.Unix(0, int64(releaseTimeNs))
entry.ReleaseTime = releaseTime
entries = append(entries, &entry)
}
if err := rows.Err(); err != nil {
return nil, err
}
return entries, nil
}
// Get retrieves all evolutions for the given selectors from DB.
func (k *telemetryMetaStore) GetColumnEvolutionMetadataMulti(ctx context.Context, selectors []*telemetrytypes.EvolutionSelector) (map[string][]*telemetrytypes.EvolutionEntry, error) {
evolutions, err := k.fetchEvolutionEntryFromClickHouse(ctx, selectors)
if err != nil {
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to fetch evolution from clickhouse %s", err.Error())
}
evolutionsByUniqueKey := make(map[string][]*telemetrytypes.EvolutionEntry)
for _, evolution := range evolutions {
key := telemetrytypes.GetEvolutionMetadataUniqueKey(&telemetrytypes.EvolutionSelector{
Signal: evolution.Signal,
FieldContext: evolution.FieldContext,
FieldName: evolution.FieldName,
})
evolutionsByUniqueKey[key] = append(evolutionsByUniqueKey[key], evolution)
}
return evolutionsByUniqueKey, nil
}
// chunkSizeFirstSeenMetricMetadata limits the number of tuples per SQL query to avoid hitting the max_query_size limit. // chunkSizeFirstSeenMetricMetadata limits the number of tuples per SQL query to avoid hitting the max_query_size limit.
// //
// Calculation Logic: // Calculation Logic:

View File

@@ -39,6 +39,7 @@ func TestGetFirstSeenFromMetricMetadata(t *testing.T) {
telemetrylogs.LogResourceKeysTblName, telemetrylogs.LogResourceKeysTblName,
DBName, DBName,
AttributesMetadataLocalTableName, AttributesMetadataLocalTableName,
ColumnEvolutionMetadataTableName,
) )
lookupKeys := []telemetrytypes.MetricMetadataLookupKey{ lookupKeys := []telemetrytypes.MetricMetadataLookupKey{

View File

@@ -4,6 +4,7 @@ import (
"context" "context"
"regexp" "regexp"
"testing" "testing"
"time"
"github.com/SigNoz/signoz/pkg/errors" "github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest" "github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
@@ -38,6 +39,7 @@ func newTestTelemetryMetaStoreTestHelper(store telemetrystore.TelemetryStore) te
telemetrylogs.LogResourceKeysTblName, telemetrylogs.LogResourceKeysTblName,
DBName, DBName,
AttributesMetadataLocalTableName, AttributesMetadataLocalTableName,
ColumnEvolutionMetadataTableName,
) )
} }
@@ -384,3 +386,386 @@ func TestGetMetricFieldValuesIntrinsicBoolReturnsEmpty(t *testing.T) {
assert.Empty(t, values.BoolValues) assert.Empty(t, values.BoolValues)
require.NoError(t, mock.ExpectationsWereMet()) require.NoError(t, mock.ExpectationsWereMet())
} }
var (
clickHouseQueryPatternWithFieldName = "SELECT.*signal.*column_name.*column_type.*field_context.*field_name.*version.*release_time.*FROM.*distributed_column_evolution_metadata.*WHERE.*signal.*=.*field_context.*=.*field_name.*=.*field_name.*=.*"
clickHouseQueryPatternWithoutFieldName = "SELECT.*signal.*column_name.*column_type.*field_context.*field_name.*version.*release_time.*FROM.*distributed_column_evolution_metadata.*WHERE.*signal.*=.*field_context.*=.*ORDER BY.*release_time.*ASC"
clickHouseColumns = []cmock.ColumnType{
{Name: "signal", Type: "String"},
{Name: "column_name", Type: "String"},
{Name: "column_type", Type: "String"},
{Name: "field_context", Type: "String"},
{Name: "field_name", Type: "String"},
{Name: "version", Type: "UInt32"},
{Name: "release_time", Type: "Float64"},
}
)
func createMockRows(values [][]any) *cmock.Rows {
return cmock.NewRows(clickHouseColumns, values)
}
func TestKeyEvolutionMetadata_Get_Multi_FetchFromClickHouse(t *testing.T) {
ctx := context.Background()
telemetryStore := telemetrystoretest.New(telemetrystore.Config{}, &regexMatcher{})
mock := telemetryStore.Mock()
releaseTime := time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC)
values := [][]any{
{
"logs",
"resources_string",
"Map(LowCardinality(String), String)",
"resource",
"__all__",
uint32(0),
float64(releaseTime.UnixNano()),
},
}
selector := &telemetrytypes.EvolutionSelector{
Signal: telemetrytypes.SignalLogs,
FieldContext: telemetrytypes.FieldContextResource,
}
rows := createMockRows(values)
mock.ExpectQuery(clickHouseQueryPatternWithoutFieldName).WithArgs(telemetrytypes.SignalLogs, telemetrytypes.FieldContextResource, "__all__").WillReturnRows(rows)
metadata := newTestTelemetryMetaStoreTestHelper(telemetryStore)
result, err := metadata.GetColumnEvolutionMetadataMulti(ctx, []*telemetrytypes.EvolutionSelector{selector})
require.NoError(t, err)
expectedKey := "logs:resource:__all__"
require.Contains(t, result, expectedKey)
require.Len(t, result[expectedKey], 1)
assert.Equal(t, telemetrytypes.SignalLogs, result[expectedKey][0].Signal)
assert.Equal(t, "resources_string", result[expectedKey][0].ColumnName)
assert.Equal(t, "Map(LowCardinality(String), String)", result[expectedKey][0].ColumnType)
assert.Equal(t, telemetrytypes.FieldContextResource, result[expectedKey][0].FieldContext)
assert.Equal(t, "__all__", result[expectedKey][0].FieldName)
assert.Equal(t, releaseTime.UnixNano(), result[expectedKey][0].ReleaseTime.UnixNano())
require.NoError(t, mock.ExpectationsWereMet())
}
func TestKeyEvolutionMetadata_Get_Multi_MultipleMetadataEntries(t *testing.T) {
ctx := context.Background()
telemetryStore := telemetrystoretest.New(telemetrystore.Config{}, &regexMatcher{})
mock := telemetryStore.Mock()
releaseTime1 := time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC)
releaseTime2 := time.Date(2024, 2, 15, 10, 0, 0, 0, time.UTC)
values := [][]any{
{
"logs",
"resources_string",
"Map(LowCardinality(String), String)",
"resource",
"__all__",
uint32(0),
float64(releaseTime1.UnixNano()),
},
{
"logs",
"resource",
"JSON()",
"resource",
"__all__",
uint32(1),
float64(releaseTime2.UnixNano()),
},
}
rows := createMockRows(values)
mock.ExpectQuery(clickHouseQueryPatternWithoutFieldName).WithArgs(telemetrytypes.SignalLogs, telemetrytypes.FieldContextResource, "__all__").WillReturnRows(rows)
metadata := newTestTelemetryMetaStoreTestHelper(telemetryStore)
selector := &telemetrytypes.EvolutionSelector{
Signal: telemetrytypes.SignalLogs,
FieldContext: telemetrytypes.FieldContextResource,
}
result, err := metadata.GetColumnEvolutionMetadataMulti(ctx, []*telemetrytypes.EvolutionSelector{selector})
require.NoError(t, err)
expectedKey := "logs:resource:__all__"
require.Contains(t, result, expectedKey)
require.Len(t, result[expectedKey], 2)
assert.Equal(t, "resources_string", result[expectedKey][0].ColumnName)
assert.Equal(t, "Map(LowCardinality(String), String)", result[expectedKey][0].ColumnType)
assert.Equal(t, "resource", result[expectedKey][0].FieldContext.StringValue())
assert.Equal(t, "__all__", result[expectedKey][0].FieldName)
assert.Equal(t, releaseTime1.UnixNano(), result[expectedKey][0].ReleaseTime.UnixNano())
assert.Equal(t, "resource", result[expectedKey][1].ColumnName)
assert.Equal(t, "JSON()", result[expectedKey][1].ColumnType)
assert.Equal(t, "resource", result[expectedKey][1].FieldContext.StringValue())
assert.Equal(t, "__all__", result[expectedKey][1].FieldName)
assert.Equal(t, releaseTime2.UnixNano(), result[expectedKey][1].ReleaseTime.UnixNano())
require.NoError(t, mock.ExpectationsWereMet())
}
func TestKeyEvolutionMetadata_Get_Multi_MultipleMetadataEntriesWithFieldName(t *testing.T) {
ctx := context.Background()
telemetryStore := telemetrystoretest.New(telemetrystore.Config{}, &regexMatcher{})
mock := telemetryStore.Mock()
releaseTime1 := time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC)
releaseTime2 := time.Date(2024, 2, 15, 10, 0, 0, 0, time.UTC)
releaseTime3 := time.Date(2024, 3, 15, 10, 0, 0, 0, time.UTC)
values := [][]any{
{
"logs",
"body",
"String",
"body",
"__all__",
uint32(0),
float64(releaseTime1.UnixNano()),
},
{
"logs",
"body_json",
"JSON()",
"body",
"__all__",
uint32(1),
float64(releaseTime2.UnixNano()),
},
{
"logs",
"body_promoted",
"JSON()",
"body",
"user.name",
uint32(2),
float64(releaseTime3.UnixNano()),
},
}
selector := &telemetrytypes.EvolutionSelector{
Signal: telemetrytypes.SignalLogs,
FieldContext: telemetrytypes.FieldContextBody,
FieldName: "user.name",
}
rows := createMockRows(values)
mock.ExpectQuery(clickHouseQueryPatternWithFieldName).WithArgs(telemetrytypes.SignalLogs, telemetrytypes.FieldContextBody, selector.FieldName, "__all__").WillReturnRows(rows)
metadata := newTestTelemetryMetaStoreTestHelper(telemetryStore)
result, err := metadata.GetColumnEvolutionMetadataMulti(ctx, []*telemetrytypes.EvolutionSelector{selector})
require.NoError(t, err)
// Check entries for "__all__" field name
expectedKeyAll := "logs:body:__all__"
require.Contains(t, result, expectedKeyAll)
require.Len(t, result[expectedKeyAll], 2)
assert.Equal(t, "body", result[expectedKeyAll][0].ColumnName)
assert.Equal(t, "String", result[expectedKeyAll][0].ColumnType)
assert.Equal(t, "body", result[expectedKeyAll][0].FieldContext.StringValue())
assert.Equal(t, "__all__", result[expectedKeyAll][0].FieldName)
assert.Equal(t, releaseTime1.UnixNano(), result[expectedKeyAll][0].ReleaseTime.UnixNano())
assert.Equal(t, "body_json", result[expectedKeyAll][1].ColumnName)
assert.Equal(t, "JSON()", result[expectedKeyAll][1].ColumnType)
assert.Equal(t, "body", result[expectedKeyAll][1].FieldContext.StringValue())
assert.Equal(t, "__all__", result[expectedKeyAll][1].FieldName)
assert.Equal(t, releaseTime2.UnixNano(), result[expectedKeyAll][1].ReleaseTime.UnixNano())
// Check entries for "user.name" field name
expectedKeyUser := "logs:body:user.name"
require.Contains(t, result, expectedKeyUser)
require.Len(t, result[expectedKeyUser], 1)
assert.Equal(t, "body_promoted", result[expectedKeyUser][0].ColumnName)
assert.Equal(t, "JSON()", result[expectedKeyUser][0].ColumnType)
assert.Equal(t, "body", result[expectedKeyUser][0].FieldContext.StringValue())
assert.Equal(t, "user.name", result[expectedKeyUser][0].FieldName)
assert.Equal(t, releaseTime3.UnixNano(), result[expectedKeyUser][0].ReleaseTime.UnixNano())
require.NoError(t, mock.ExpectationsWereMet())
}
func TestKeyEvolutionMetadata_Get_Multi_MultipleMetadataEntriesWithMultipleSelectors(t *testing.T) {
ctx := context.Background()
telemetryStore := telemetrystoretest.New(telemetrystore.Config{}, &regexMatcher{})
mock := telemetryStore.Mock()
// releaseTime1 := time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC)
releaseTime2 := time.Date(2024, 2, 15, 10, 0, 0, 0, time.UTC)
releaseTime3 := time.Date(2024, 3, 15, 10, 0, 0, 0, time.UTC)
values := [][]any{
{
"logs",
"body_json",
"JSON()",
"body",
"__all__",
uint32(0),
float64(releaseTime2.UnixNano()),
},
{
"logs",
"body_promoted",
"JSON()",
"body",
"user.name",
uint32(1),
float64(releaseTime3.UnixNano()),
},
{
"traces",
"resources_string",
"map()",
telemetrytypes.FieldContextResource,
"__all__",
uint32(0),
float64(releaseTime2.UnixNano()),
},
{
telemetrytypes.SignalTraces,
"resource",
"JSON()",
telemetrytypes.FieldContextResource,
"__all__",
uint32(1),
float64(releaseTime3.UnixNano()),
},
}
selectors := []*telemetrytypes.EvolutionSelector{
{
Signal: telemetrytypes.SignalLogs,
FieldContext: telemetrytypes.FieldContextBody,
FieldName: "user.name",
},
{
Signal: telemetrytypes.SignalTraces,
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "service.name",
},
}
query := `SELECT signal, column_name, column_type, field_context, field_name, version, release_time FROM signoz_metadata\.distributed_column_evolution_metadata WHERE ` +
`\(\(signal = \? AND \(field_context = \? AND \(field_name = \? OR field_name = \?\)\)\) OR ` +
`\(signal = \? AND \(field_context = \? AND \(field_name = \? OR field_name = \?\)\)\)\) ` +
`ORDER BY release_time ASC`
rows := createMockRows(values)
mock.ExpectQuery(query).WithArgs(
telemetrytypes.SignalLogs, telemetrytypes.FieldContextBody, selectors[0].FieldName, "__all__",
telemetrytypes.SignalTraces, telemetrytypes.FieldContextResource, selectors[1].FieldName, "__all__",
).WillReturnRows(rows)
metadata := newTestTelemetryMetaStoreTestHelper(telemetryStore)
_, err := metadata.GetColumnEvolutionMetadataMulti(ctx, selectors)
require.NoError(t, err)
require.NoError(t, mock.ExpectationsWereMet())
}
func TestKeyEvolutionMetadata_Get_Multi_EmptyResultFromClickHouse(t *testing.T) {
ctx := context.Background()
telemetryStore := telemetrystoretest.New(telemetrystore.Config{}, &regexMatcher{})
mock := telemetryStore.Mock()
rows := createMockRows([][]any{})
mock.ExpectQuery(clickHouseQueryPatternWithoutFieldName).WithArgs(telemetrytypes.SignalLogs, telemetrytypes.FieldContextResource, "__all__").WillReturnRows(rows)
metadata := newTestTelemetryMetaStoreTestHelper(telemetryStore)
selector := &telemetrytypes.EvolutionSelector{
Signal: telemetrytypes.SignalLogs,
FieldContext: telemetrytypes.FieldContextResource,
}
result, err := metadata.GetColumnEvolutionMetadataMulti(ctx, []*telemetrytypes.EvolutionSelector{selector})
require.NoError(t, err)
assert.Empty(t, result)
require.NoError(t, mock.ExpectationsWereMet())
}
func TestKeyEvolutionMetadata_Get_Multi_ClickHouseQueryError(t *testing.T) {
ctx := context.Background()
telemetryStore := telemetrystoretest.New(telemetrystore.Config{}, &regexMatcher{})
mock := telemetryStore.Mock()
mock.ExpectQuery(clickHouseQueryPatternWithoutFieldName).WithArgs(telemetrytypes.SignalLogs, telemetrytypes.FieldContextResource, "__all__").WillReturnError(assert.AnError)
metadata := newTestTelemetryMetaStoreTestHelper(telemetryStore)
selector := &telemetrytypes.EvolutionSelector{
Signal: telemetrytypes.SignalLogs,
FieldContext: telemetrytypes.FieldContextResource,
}
_, err := metadata.GetColumnEvolutionMetadataMulti(ctx, []*telemetrytypes.EvolutionSelector{selector})
require.Error(t, err)
}
func TestKeyEvolutionMetadata_Get_Multi_MultipleSelectors(t *testing.T) {
ctx := context.Background()
telemetryStore := telemetrystoretest.New(telemetrystore.Config{}, &regexMatcher{})
mock := telemetryStore.Mock()
releaseTime1 := time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC)
releaseTime2 := time.Date(2024, 2, 15, 10, 0, 0, 0, time.UTC)
values := [][]any{
{
telemetrytypes.SignalLogs,
"resources_string",
"Map(LowCardinality(String), String)",
telemetrytypes.FieldContextResource,
"__all__",
uint32(0),
float64(releaseTime1.UnixNano()),
},
{
telemetrytypes.SignalLogs,
"body",
"JSON()",
telemetrytypes.FieldContextBody,
"__all__",
uint32(1),
float64(releaseTime2.UnixNano()),
},
}
// When multiple selectors are provided, the query will have OR conditions
// The pattern should match queries with multiple OR clauses
queryPattern := "SELECT.*signal.*column_name.*column_type.*field_context.*field_name.*release_time.*FROM.*distributed_column_evolution_metadata.*WHERE.*ORDER BY.*release_time.*ASC"
rows := createMockRows(values)
mock.ExpectQuery(queryPattern).WillReturnRows(rows).WithArgs(telemetrytypes.SignalLogs, telemetrytypes.FieldContextResource, "__all__", "__all__", telemetrytypes.SignalLogs, telemetrytypes.FieldContextBody, "__all__", "__all__")
metadata := newTestTelemetryMetaStoreTestHelper(telemetryStore)
selectors := []*telemetrytypes.EvolutionSelector{
{
Signal: telemetrytypes.SignalLogs,
FieldContext: telemetrytypes.FieldContextResource,
FieldName: "__all__",
},
{
Signal: telemetrytypes.SignalLogs,
FieldContext: telemetrytypes.FieldContextBody,
FieldName: "__all__",
},
}
result, err := metadata.GetColumnEvolutionMetadataMulti(ctx, selectors)
require.NoError(t, err)
// Should have entries for both selectors
expectedKey1 := "logs:resource:__all__"
expectedKey2 := "logs:body:__all__"
require.Contains(t, result, expectedKey1)
require.Contains(t, result, expectedKey2)
require.Len(t, result[expectedKey1], 1)
require.Len(t, result[expectedKey2], 1)
assert.Equal(t, "resources_string", result[expectedKey1][0].ColumnName)
assert.Equal(t, "body", result[expectedKey2][0].ColumnName)
require.NoError(t, mock.ExpectationsWereMet())
}

View File

@@ -6,6 +6,7 @@ const (
DBName = "signoz_metadata" DBName = "signoz_metadata"
AttributesMetadataTableName = "distributed_attributes_metadata" AttributesMetadataTableName = "distributed_attributes_metadata"
AttributesMetadataLocalTableName = "attributes_metadata" AttributesMetadataLocalTableName = "attributes_metadata"
ColumnEvolutionMetadataTableName = "distributed_column_evolution_metadata"
PathTypesTableName = otelcollectorconst.DistributedPathTypesTable PathTypesTableName = otelcollectorconst.DistributedPathTypesTable
PromotedPathsTableName = otelcollectorconst.DistributedPromotedPathsTable PromotedPathsTableName = otelcollectorconst.DistributedPromotedPathsTable
SkipIndexTableName = "system.data_skipping_indices" SkipIndexTableName = "system.data_skipping_indices"

View File

@@ -122,7 +122,7 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDeltaFastPath(
stepSec, stepSec,
)) ))
for _, g := range query.GroupBy { for _, g := range query.GroupBy {
col, err := b.fm.ColumnExpressionFor(ctx, &g.TelemetryFieldKey, keys) col, err := b.fm.ColumnExpressionFor(ctx, start, end, &g.TelemetryFieldKey, keys)
if err != nil { if err != nil {
return "", nil, err return "", nil, err
} }
@@ -147,13 +147,16 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDeltaFastPath(
) )
if query.Filter != nil && query.Filter.Expression != "" { if query.Filter != nil && query.Filter.Expression != "" {
filterWhere, err = querybuilder.PrepareWhereClause(query.Filter.Expression, querybuilder.FilterExprVisitorOpts{ filterWhere, err = querybuilder.PrepareWhereClause(query.Filter.Expression, querybuilder.FilterExprVisitorOpts{
Context: ctx,
Logger: b.logger, Logger: b.logger,
FieldMapper: b.fm, FieldMapper: b.fm,
ConditionBuilder: b.cb, ConditionBuilder: b.cb,
FieldKeys: keys, FieldKeys: keys,
FullTextColumn: &telemetrytypes.TelemetryFieldKey{Name: "labels"}, FullTextColumn: &telemetrytypes.TelemetryFieldKey{Name: "labels"},
Variables: variables, Variables: variables,
}, start, end) StartNs: start,
EndNs: end,
})
if err != nil { if err != nil {
return "", nil, err return "", nil, err
} }
@@ -205,7 +208,7 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDelta(
)) ))
for _, g := range query.GroupBy { for _, g := range query.GroupBy {
col, err := b.fm.ColumnExpressionFor(ctx, &g.TelemetryFieldKey, keys) col, err := b.fm.ColumnExpressionFor(ctx, start, end, &g.TelemetryFieldKey, keys)
if err != nil { if err != nil {
return "", nil, err return "", nil, err
} }
@@ -233,13 +236,16 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDelta(
if query.Filter != nil && query.Filter.Expression != "" { if query.Filter != nil && query.Filter.Expression != "" {
filterWhere, err = querybuilder.PrepareWhereClause(query.Filter.Expression, querybuilder.FilterExprVisitorOpts{ filterWhere, err = querybuilder.PrepareWhereClause(query.Filter.Expression, querybuilder.FilterExprVisitorOpts{
Context: ctx,
Logger: b.logger, Logger: b.logger,
FieldMapper: b.fm, FieldMapper: b.fm,
ConditionBuilder: b.cb, ConditionBuilder: b.cb,
FieldKeys: keys, FieldKeys: keys,
FullTextColumn: &telemetrytypes.TelemetryFieldKey{Name: "labels"}, FullTextColumn: &telemetrytypes.TelemetryFieldKey{Name: "labels"},
Variables: variables, Variables: variables,
}, start, end) StartNs: start,
EndNs: end,
})
if err != nil { if err != nil {
return "", nil, err return "", nil, err
} }
@@ -278,7 +284,7 @@ func (b *meterQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
stepSec, stepSec,
)) ))
for _, g := range query.GroupBy { for _, g := range query.GroupBy {
col, err := b.fm.ColumnExpressionFor(ctx, &g.TelemetryFieldKey, keys) col, err := b.fm.ColumnExpressionFor(ctx, start, end, &g.TelemetryFieldKey, keys)
if err != nil { if err != nil {
return "", nil, err return "", nil, err
} }
@@ -300,13 +306,16 @@ func (b *meterQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
) )
if query.Filter != nil && query.Filter.Expression != "" { if query.Filter != nil && query.Filter.Expression != "" {
filterWhere, err = querybuilder.PrepareWhereClause(query.Filter.Expression, querybuilder.FilterExprVisitorOpts{ filterWhere, err = querybuilder.PrepareWhereClause(query.Filter.Expression, querybuilder.FilterExprVisitorOpts{
Context: ctx,
Logger: b.logger, Logger: b.logger,
FieldMapper: b.fm, FieldMapper: b.fm,
ConditionBuilder: b.cb, ConditionBuilder: b.cb,
FieldKeys: keys, FieldKeys: keys,
FullTextColumn: &telemetrytypes.TelemetryFieldKey{Name: "labels"}, FullTextColumn: &telemetrytypes.TelemetryFieldKey{Name: "labels"},
Variables: variables, Variables: variables,
}, start, end) StartNs: start,
EndNs: end,
})
if err != nil { if err != nil {
return "", nil, err return "", nil, err
} }

View File

@@ -23,6 +23,8 @@ func NewConditionBuilder(fm qbtypes.FieldMapper) *conditionBuilder {
func (c *conditionBuilder) conditionFor( func (c *conditionBuilder) conditionFor(
ctx context.Context, ctx context.Context,
startNs uint64,
endNs uint64,
key *telemetrytypes.TelemetryFieldKey, key *telemetrytypes.TelemetryFieldKey,
operator qbtypes.FilterOperator, operator qbtypes.FilterOperator,
value any, value any,
@@ -33,7 +35,7 @@ func (c *conditionBuilder) conditionFor(
value = querybuilder.FormatValueForContains(value) value = querybuilder.FormatValueForContains(value)
} }
tblFieldName, err := c.fm.FieldFor(ctx, key) tblFieldName, err := c.fm.FieldFor(ctx, startNs, endNs, key)
if err != nil { if err != nil {
return "", err return "", err
} }
@@ -141,14 +143,14 @@ func (c *conditionBuilder) conditionFor(
func (c *conditionBuilder) ConditionFor( func (c *conditionBuilder) ConditionFor(
ctx context.Context, ctx context.Context,
startNs uint64,
endNs uint64,
key *telemetrytypes.TelemetryFieldKey, key *telemetrytypes.TelemetryFieldKey,
operator qbtypes.FilterOperator, operator qbtypes.FilterOperator,
value any, value any,
sb *sqlbuilder.SelectBuilder, sb *sqlbuilder.SelectBuilder,
_ uint64,
_ uint64,
) (string, error) { ) (string, error) {
condition, err := c.conditionFor(ctx, key, operator, value, sb) condition, err := c.conditionFor(ctx, startNs, endNs, key, operator, value, sb)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@@ -234,7 +234,7 @@ func TestConditionFor(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
sb := sqlbuilder.NewSelectBuilder() sb := sqlbuilder.NewSelectBuilder()
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
cond, err := conditionBuilder.ConditionFor(ctx, &tc.key, tc.operator, tc.value, sb, 0, 0) cond, err := conditionBuilder.ConditionFor(ctx, 0, 0, &tc.key, tc.operator, tc.value, sb)
sb.Where(cond) sb.Where(cond)
if tc.expectedError != nil { if tc.expectedError != nil {
@@ -289,7 +289,7 @@ func TestConditionForMultipleKeys(t *testing.T) {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
var err error var err error
for _, key := range tc.keys { for _, key := range tc.keys {
cond, err := conditionBuilder.ConditionFor(ctx, &key, tc.operator, tc.value, sb, 0, 0) cond, err := conditionBuilder.ConditionFor(ctx, 0, 0, &key, tc.operator, tc.value, sb)
sb.Where(cond) sb.Where(cond)
if err != nil { if err != nil {
t.Fatalf("Error getting condition for key %s: %v", key.Name, err) t.Fatalf("Error getting condition for key %s: %v", key.Name, err)

View File

@@ -41,62 +41,63 @@ func NewFieldMapper() qbtypes.FieldMapper {
return &fieldMapper{} return &fieldMapper{}
} }
func (m *fieldMapper) getColumn(_ context.Context, key *telemetrytypes.TelemetryFieldKey) (*schema.Column, error) { func (m *fieldMapper) getColumn(_ context.Context, _, _ uint64, key *telemetrytypes.TelemetryFieldKey) ([]*schema.Column, error) {
switch key.FieldContext { switch key.FieldContext {
case telemetrytypes.FieldContextResource, telemetrytypes.FieldContextScope, telemetrytypes.FieldContextAttribute: case telemetrytypes.FieldContextResource, telemetrytypes.FieldContextScope, telemetrytypes.FieldContextAttribute:
return timeSeriesV4Columns["labels"], nil return []*schema.Column{timeSeriesV4Columns["labels"]}, nil
case telemetrytypes.FieldContextMetric: case telemetrytypes.FieldContextMetric:
col, ok := timeSeriesV4Columns[key.Name] col, ok := timeSeriesV4Columns[key.Name]
if !ok { if !ok {
return nil, qbtypes.ErrColumnNotFound return []*schema.Column{}, qbtypes.ErrColumnNotFound
} }
return col, nil return []*schema.Column{col}, nil
case telemetrytypes.FieldContextUnspecified: case telemetrytypes.FieldContextUnspecified:
col, ok := timeSeriesV4Columns[key.Name] col, ok := timeSeriesV4Columns[key.Name]
if !ok { if !ok {
// if nothing is found, return labels column // if nothing is found, return labels column
// as we keep all the labels in the labels column // as we keep all the labels in the labels column
return timeSeriesV4Columns["labels"], nil return []*schema.Column{timeSeriesV4Columns["labels"]}, nil
} }
return col, nil return []*schema.Column{col}, nil
} }
return nil, qbtypes.ErrColumnNotFound return nil, qbtypes.ErrColumnNotFound
} }
func (m *fieldMapper) FieldFor(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (string, error) { func (m *fieldMapper) FieldFor(ctx context.Context, startNs, endNs uint64, key *telemetrytypes.TelemetryFieldKey) (string, error) {
column, err := m.getColumn(ctx, key) columns, err := m.getColumn(ctx, startNs, endNs, key)
if err != nil { if err != nil {
return "", err return "", err
} }
switch key.FieldContext { switch key.FieldContext {
case telemetrytypes.FieldContextResource, telemetrytypes.FieldContextScope, telemetrytypes.FieldContextAttribute: case telemetrytypes.FieldContextResource, telemetrytypes.FieldContextScope, telemetrytypes.FieldContextAttribute:
return fmt.Sprintf("JSONExtractString(%s, '%s')", column.Name, key.Name), nil return fmt.Sprintf("JSONExtractString(%s, '%s')", columns[0].Name, key.Name), nil
case telemetrytypes.FieldContextMetric: case telemetrytypes.FieldContextMetric:
return column.Name, nil return columns[0].Name, nil
case telemetrytypes.FieldContextUnspecified: case telemetrytypes.FieldContextUnspecified:
if slices.Contains(IntrinsicFields, key.Name) { if slices.Contains(IntrinsicFields, key.Name) {
return column.Name, nil return columns[0].Name, nil
} }
return fmt.Sprintf("JSONExtractString(%s, '%s')", column.Name, key.Name), nil return fmt.Sprintf("JSONExtractString(%s, '%s')", columns[0].Name, key.Name), nil
} }
return column.Name, nil return columns[0].Name, nil
} }
func (m *fieldMapper) ColumnFor(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (*schema.Column, error) { func (m *fieldMapper) ColumnFor(ctx context.Context, tsStart, tsEnd uint64, key *telemetrytypes.TelemetryFieldKey) ([]*schema.Column, error) {
return m.getColumn(ctx, key) return m.getColumn(ctx, tsStart, tsEnd, key)
} }
func (m *fieldMapper) ColumnExpressionFor( func (m *fieldMapper) ColumnExpressionFor(
ctx context.Context, ctx context.Context,
startNs, endNs uint64,
field *telemetrytypes.TelemetryFieldKey, field *telemetrytypes.TelemetryFieldKey,
keys map[string][]*telemetrytypes.TelemetryFieldKey, keys map[string][]*telemetrytypes.TelemetryFieldKey,
) (string, error) { ) (string, error) {
colName, err := m.FieldFor(ctx, field) colName, err := m.FieldFor(ctx, startNs, endNs, field)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@@ -123,13 +123,13 @@ func TestGetColumn(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
col, err := fm.ColumnFor(ctx, &tc.key) col, err := fm.ColumnFor(ctx, 0, 0, &tc.key)
if tc.expectedError != nil { if tc.expectedError != nil {
assert.Equal(t, tc.expectedError, err) assert.Equal(t, tc.expectedError, err)
} else { } else {
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, tc.expectedCol, col) assert.Equal(t, tc.expectedCol, col[0])
} }
}) })
} }
@@ -207,7 +207,7 @@ func TestGetFieldKeyName(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
result, err := fm.FieldFor(ctx, &tc.key) result, err := fm.FieldFor(ctx, 0, 0, &tc.key)
if tc.expectedError != nil { if tc.expectedError != nil {
assert.Equal(t, tc.expectedError, err) assert.Equal(t, tc.expectedError, err)

View File

@@ -269,13 +269,16 @@ func (b *MetricQueryStatementBuilder) buildTimeSeriesCTE(
if query.Filter != nil && query.Filter.Expression != "" { if query.Filter != nil && query.Filter.Expression != "" {
preparedWhereClause, err = querybuilder.PrepareWhereClause(query.Filter.Expression, querybuilder.FilterExprVisitorOpts{ preparedWhereClause, err = querybuilder.PrepareWhereClause(query.Filter.Expression, querybuilder.FilterExprVisitorOpts{
Context: ctx,
Logger: b.logger, Logger: b.logger,
FieldMapper: b.fm, FieldMapper: b.fm,
ConditionBuilder: b.cb, ConditionBuilder: b.cb,
FieldKeys: keys, FieldKeys: keys,
FullTextColumn: &telemetrytypes.TelemetryFieldKey{Name: "labels"}, FullTextColumn: &telemetrytypes.TelemetryFieldKey{Name: "labels"},
Variables: variables, Variables: variables,
}, start, end) StartNs: start,
EndNs: end,
})
if err != nil { if err != nil {
return "", nil, err return "", nil, err
} }
@@ -286,7 +289,7 @@ func (b *MetricQueryStatementBuilder) buildTimeSeriesCTE(
sb.Select("fingerprint") sb.Select("fingerprint")
for _, g := range query.GroupBy { for _, g := range query.GroupBy {
col, err := b.fm.ColumnExpressionFor(ctx, &g.TelemetryFieldKey, keys) col, err := b.fm.ColumnExpressionFor(ctx, start, end, &g.TelemetryFieldKey, keys)
if err != nil { if err != nil {
return "", nil, err return "", nil, err
} }

View File

@@ -29,6 +29,8 @@ func NewConditionBuilder(fm qbtypes.FieldMapper) *conditionBuilder {
func (c *conditionBuilder) conditionFor( func (c *conditionBuilder) conditionFor(
ctx context.Context, ctx context.Context,
startNs uint64,
endNs uint64,
key *telemetrytypes.TelemetryFieldKey, key *telemetrytypes.TelemetryFieldKey,
operator qbtypes.FilterOperator, operator qbtypes.FilterOperator,
value any, value any,
@@ -40,13 +42,13 @@ func (c *conditionBuilder) conditionFor(
} }
// first, locate the raw column type (so we can choose the right EXISTS logic) // first, locate the raw column type (so we can choose the right EXISTS logic)
column, err := c.fm.ColumnFor(ctx, key) columns, err := c.fm.ColumnFor(ctx, startNs, endNs, key)
if err != nil { if err != nil {
return "", err return "", err
} }
// then ask the mapper for the actual SQL reference // then ask the mapper for the actual SQL reference
tblFieldName, err := c.fm.FieldFor(ctx, key) tblFieldName, err := c.fm.FieldFor(ctx, startNs, endNs, key)
if err != nil { if err != nil {
return "", err return "", err
} }
@@ -159,7 +161,7 @@ func (c *conditionBuilder) conditionFor(
case qbtypes.FilterOperatorExists, qbtypes.FilterOperatorNotExists: case qbtypes.FilterOperatorExists, qbtypes.FilterOperatorNotExists:
var value any var value any
switch column.Type.GetType() { switch columns[0].Type.GetType() {
case schema.ColumnTypeEnumJSON: case schema.ColumnTypeEnumJSON:
if operator == qbtypes.FilterOperatorExists { if operator == qbtypes.FilterOperatorExists {
return sb.IsNotNull(tblFieldName), nil return sb.IsNotNull(tblFieldName), nil
@@ -176,7 +178,7 @@ func (c *conditionBuilder) conditionFor(
return sb.E(tblFieldName, value), nil return sb.E(tblFieldName, value), nil
} }
case schema.ColumnTypeEnumLowCardinality: case schema.ColumnTypeEnumLowCardinality:
switch elementType := column.Type.(schema.LowCardinalityColumnType).ElementType; elementType.GetType() { switch elementType := columns[0].Type.(schema.LowCardinalityColumnType).ElementType; elementType.GetType() {
case schema.ColumnTypeEnumString: case schema.ColumnTypeEnumString:
value = "" value = ""
if operator == qbtypes.FilterOperatorExists { if operator == qbtypes.FilterOperatorExists {
@@ -200,14 +202,14 @@ func (c *conditionBuilder) conditionFor(
return sb.E(tblFieldName, value), nil return sb.E(tblFieldName, value), nil
} }
case schema.ColumnTypeEnumMap: case schema.ColumnTypeEnumMap:
keyType := column.Type.(schema.MapColumnType).KeyType keyType := columns[0].Type.(schema.MapColumnType).KeyType
if _, ok := keyType.(schema.LowCardinalityColumnType); !ok { if _, ok := keyType.(schema.LowCardinalityColumnType); !ok {
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "key type %s is not supported for map column type %s", keyType, column.Type) return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "key type %s is not supported for map column type %s", keyType, columns[0].Type)
} }
switch valueType := column.Type.(schema.MapColumnType).ValueType; valueType.GetType() { switch valueType := columns[0].Type.(schema.MapColumnType).ValueType; valueType.GetType() {
case schema.ColumnTypeEnumString, schema.ColumnTypeEnumBool, schema.ColumnTypeEnumFloat64: case schema.ColumnTypeEnumString, schema.ColumnTypeEnumBool, schema.ColumnTypeEnumFloat64:
leftOperand := fmt.Sprintf("mapContains(%s, '%s')", column.Name, key.Name) leftOperand := fmt.Sprintf("mapContains(%s, '%s')", columns[0].Name, key.Name)
if key.Materialized { if key.Materialized {
leftOperand = telemetrytypes.FieldKeyToMaterializedColumnNameForExists(key) leftOperand = telemetrytypes.FieldKeyToMaterializedColumnNameForExists(key)
} }
@@ -220,7 +222,7 @@ func (c *conditionBuilder) conditionFor(
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "exists operator is not supported for map column type %s", valueType) return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "exists operator is not supported for map column type %s", valueType)
} }
default: default:
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "exists operator is not supported for column type %s", column.Type) return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "exists operator is not supported for column type %s", columns[0].Type)
} }
} }
return "", nil return "", nil
@@ -228,25 +230,25 @@ func (c *conditionBuilder) conditionFor(
func (c *conditionBuilder) ConditionFor( func (c *conditionBuilder) ConditionFor(
ctx context.Context, ctx context.Context,
startNs uint64,
endNs uint64,
key *telemetrytypes.TelemetryFieldKey, key *telemetrytypes.TelemetryFieldKey,
operator qbtypes.FilterOperator, operator qbtypes.FilterOperator,
value any, value any,
sb *sqlbuilder.SelectBuilder, sb *sqlbuilder.SelectBuilder,
startNs uint64,
_ uint64,
) (string, error) { ) (string, error) {
if c.isSpanScopeField(key.Name) { if c.isSpanScopeField(key.Name) {
return c.buildSpanScopeCondition(key, operator, value, startNs) return c.buildSpanScopeCondition(key, operator, value, startNs)
} }
condition, err := c.conditionFor(ctx, key, operator, value, sb) condition, err := c.conditionFor(ctx, startNs, endNs, key, operator, value, sb)
if err != nil { if err != nil {
return "", err return "", err
} }
if operator.AddDefaultExistsFilter() { if operator.AddDefaultExistsFilter() {
// skip adding exists filter for intrinsic fields // skip adding exists filter for intrinsic fields
field, _ := c.fm.FieldFor(ctx, key) field, _ := c.fm.FieldFor(ctx, startNs, endNs, key)
if slices.Contains(maps.Keys(IntrinsicFields), field) || if slices.Contains(maps.Keys(IntrinsicFields), field) ||
slices.Contains(maps.Keys(IntrinsicFieldsDeprecated), field) || slices.Contains(maps.Keys(IntrinsicFieldsDeprecated), field) ||
slices.Contains(maps.Keys(CalculatedFields), field) || slices.Contains(maps.Keys(CalculatedFields), field) ||
@@ -254,7 +256,7 @@ func (c *conditionBuilder) ConditionFor(
return condition, nil return condition, nil
} }
existsCondition, err := c.conditionFor(ctx, key, qbtypes.FilterOperatorExists, nil, sb) existsCondition, err := c.conditionFor(ctx, startNs, endNs, key, qbtypes.FilterOperatorExists, nil, sb)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@@ -289,7 +289,7 @@ func TestConditionFor(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
sb := sqlbuilder.NewSelectBuilder() sb := sqlbuilder.NewSelectBuilder()
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
cond, err := conditionBuilder.ConditionFor(ctx, &tc.key, tc.operator, tc.value, sb, 1761437108000000000, 1761458708000000000) cond, err := conditionBuilder.ConditionFor(ctx, 1761437108000000000, 1761458708000000000, &tc.key, tc.operator, tc.value, sb)
sb.Where(cond) sb.Where(cond)
if tc.expectedError != nil { if tc.expectedError != nil {

View File

@@ -169,23 +169,24 @@ func NewFieldMapper() *defaultFieldMapper {
func (m *defaultFieldMapper) getColumn( func (m *defaultFieldMapper) getColumn(
_ context.Context, _ context.Context,
_, _ uint64,
key *telemetrytypes.TelemetryFieldKey, key *telemetrytypes.TelemetryFieldKey,
) (*schema.Column, error) { ) ([]*schema.Column, error) {
switch key.FieldContext { switch key.FieldContext {
case telemetrytypes.FieldContextResource: case telemetrytypes.FieldContextResource:
return indexV3Columns["resource"], nil return []*schema.Column{indexV3Columns["resource"]}, nil
case telemetrytypes.FieldContextScope: case telemetrytypes.FieldContextScope:
return nil, qbtypes.ErrColumnNotFound return []*schema.Column{}, qbtypes.ErrColumnNotFound
case telemetrytypes.FieldContextAttribute: case telemetrytypes.FieldContextAttribute:
switch key.FieldDataType { switch key.FieldDataType {
case telemetrytypes.FieldDataTypeString: case telemetrytypes.FieldDataTypeString:
return indexV3Columns["attributes_string"], nil return []*schema.Column{indexV3Columns["attributes_string"]}, nil
case telemetrytypes.FieldDataTypeInt64, case telemetrytypes.FieldDataTypeInt64,
telemetrytypes.FieldDataTypeFloat64, telemetrytypes.FieldDataTypeFloat64,
telemetrytypes.FieldDataTypeNumber: telemetrytypes.FieldDataTypeNumber:
return indexV3Columns["attributes_number"], nil return []*schema.Column{indexV3Columns["attributes_number"]}, nil
case telemetrytypes.FieldDataTypeBool: case telemetrytypes.FieldDataTypeBool:
return indexV3Columns["attributes_bool"], nil return []*schema.Column{indexV3Columns["attributes_bool"]}, nil
} }
case telemetrytypes.FieldContextSpan, telemetrytypes.FieldContextUnspecified: case telemetrytypes.FieldContextSpan, telemetrytypes.FieldContextUnspecified:
/* /*
@@ -196,7 +197,7 @@ func (m *defaultFieldMapper) getColumn(
// Check if this is a span scope field // Check if this is a span scope field
if strings.ToLower(key.Name) == SpanSearchScopeRoot || strings.ToLower(key.Name) == SpanSearchScopeEntryPoint { if strings.ToLower(key.Name) == SpanSearchScopeRoot || strings.ToLower(key.Name) == SpanSearchScopeEntryPoint {
// The actual SQL will be generated in the condition builder // The actual SQL will be generated in the condition builder
return &schema.Column{Name: key.Name, Type: schema.ColumnTypeBool}, nil return []*schema.Column{{Name: key.Name, Type: schema.ColumnTypeBool}}, nil
} }
// TODO(srikanthccv): remove this when it's safe to remove // TODO(srikanthccv): remove this when it's safe to remove
@@ -210,18 +211,18 @@ func (m *defaultFieldMapper) getColumn(
if _, ok := CalculatedFieldsDeprecated[key.Name]; ok { if _, ok := CalculatedFieldsDeprecated[key.Name]; ok {
// Check if we have a mapping for the deprecated calculated field // Check if we have a mapping for the deprecated calculated field
if col, ok := indexV3Columns[oldToNew[key.Name]]; ok { if col, ok := indexV3Columns[oldToNew[key.Name]]; ok {
return col, nil return []*schema.Column{col}, nil
} }
} }
if _, ok := IntrinsicFieldsDeprecated[key.Name]; ok { if _, ok := IntrinsicFieldsDeprecated[key.Name]; ok {
// Check if we have a mapping for the deprecated intrinsic field // Check if we have a mapping for the deprecated intrinsic field
if col, ok := indexV3Columns[oldToNew[key.Name]]; ok { if col, ok := indexV3Columns[oldToNew[key.Name]]; ok {
return col, nil return []*schema.Column{col}, nil
} }
} }
if col, ok := indexV3Columns[key.Name]; ok { if col, ok := indexV3Columns[key.Name]; ok {
return col, nil return []*schema.Column{col}, nil
} }
} }
return nil, qbtypes.ErrColumnNotFound return nil, qbtypes.ErrColumnNotFound
@@ -229,15 +230,17 @@ func (m *defaultFieldMapper) getColumn(
func (m *defaultFieldMapper) ColumnFor( func (m *defaultFieldMapper) ColumnFor(
ctx context.Context, ctx context.Context,
startNs, endNs uint64,
key *telemetrytypes.TelemetryFieldKey, key *telemetrytypes.TelemetryFieldKey,
) (*schema.Column, error) { ) ([]*schema.Column, error) {
return m.getColumn(ctx, key) return m.getColumn(ctx, startNs, endNs, key)
} }
// FieldFor returns the table field name for the given key if it exists // FieldFor returns the table field name for the given key if it exists
// otherwise it returns qbtypes.ErrColumnNotFound // otherwise it returns qbtypes.ErrColumnNotFound
func (m *defaultFieldMapper) FieldFor( func (m *defaultFieldMapper) FieldFor(
ctx context.Context, ctx context.Context,
startNs, endNs uint64,
key *telemetrytypes.TelemetryFieldKey, key *telemetrytypes.TelemetryFieldKey,
) (string, error) { ) (string, error) {
// Special handling for span scope fields // Special handling for span scope fields
@@ -247,10 +250,11 @@ func (m *defaultFieldMapper) FieldFor(
return key.Name, nil return key.Name, nil
} }
column, err := m.getColumn(ctx, key) columns, err := m.getColumn(ctx, startNs, endNs, key)
if err != nil { if err != nil {
return "", err return "", err
} }
column := columns[0]
switch column.Type.GetType() { switch column.Type.GetType() {
case schema.ColumnTypeEnumJSON: case schema.ColumnTypeEnumJSON:
@@ -310,11 +314,12 @@ func (m *defaultFieldMapper) FieldFor(
// if it exists otherwise it returns qbtypes.ErrColumnNotFound // if it exists otherwise it returns qbtypes.ErrColumnNotFound
func (m *defaultFieldMapper) ColumnExpressionFor( func (m *defaultFieldMapper) ColumnExpressionFor(
ctx context.Context, ctx context.Context,
startNs, endNs uint64,
field *telemetrytypes.TelemetryFieldKey, field *telemetrytypes.TelemetryFieldKey,
keys map[string][]*telemetrytypes.TelemetryFieldKey, keys map[string][]*telemetrytypes.TelemetryFieldKey,
) (string, error) { ) (string, error) {
colName, err := m.FieldFor(ctx, field) colName, err := m.FieldFor(ctx, startNs, endNs, field)
if errors.Is(err, qbtypes.ErrColumnNotFound) { if errors.Is(err, qbtypes.ErrColumnNotFound) {
// the key didn't have the right context to be added to the query // the key didn't have the right context to be added to the query
// we try to use the context we know of // we try to use the context we know of
@@ -324,7 +329,7 @@ func (m *defaultFieldMapper) ColumnExpressionFor(
if _, ok := indexV3Columns[field.Name]; ok { if _, ok := indexV3Columns[field.Name]; ok {
// if it is, attach the column name directly // if it is, attach the column name directly
field.FieldContext = telemetrytypes.FieldContextSpan field.FieldContext = telemetrytypes.FieldContextSpan
colName, _ = m.FieldFor(ctx, field) colName, _ = m.FieldFor(ctx, startNs, endNs, field)
} else { } else {
// - the context is not provided // - the context is not provided
// - there are not keys for the field // - there are not keys for the field
@@ -342,12 +347,12 @@ func (m *defaultFieldMapper) ColumnExpressionFor(
} }
} else if len(keysForField) == 1 { } else if len(keysForField) == 1 {
// we have a single key for the field, use it // we have a single key for the field, use it
colName, _ = m.FieldFor(ctx, keysForField[0]) colName, _ = m.FieldFor(ctx, startNs, endNs, keysForField[0])
} else { } else {
// select any non-empty value from the keys // select any non-empty value from the keys
args := []string{} args := []string{}
for _, key := range keysForField { for _, key := range keysForField {
colName, _ = m.FieldFor(ctx, key) colName, _ = m.FieldFor(ctx, startNs, endNs, key)
args = append(args, fmt.Sprintf("toString(%s) != '', toString(%s)", colName, colName)) args = append(args, fmt.Sprintf("toString(%s) != '', toString(%s)", colName, colName))
} }
colName = fmt.Sprintf("multiIf(%s, NULL)", strings.Join(args, ", ")) colName = fmt.Sprintf("multiIf(%s, NULL)", strings.Join(args, ", "))

View File

@@ -92,7 +92,7 @@ func TestGetFieldKeyName(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
fm := NewFieldMapper() fm := NewFieldMapper()
result, err := fm.FieldFor(ctx, &tc.key) result, err := fm.FieldFor(ctx, 0, 0, &tc.key)
if tc.expectedError != nil { if tc.expectedError != nil {
assert.Equal(t, tc.expectedError, err) assert.Equal(t, tc.expectedError, err)

View File

@@ -1,6 +1,7 @@
package telemetrytraces package telemetrytraces
import ( import (
"context"
"testing" "testing"
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest" "github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
@@ -75,13 +76,16 @@ func TestSpanScopeFilterExpression(t *testing.T) {
FieldContext: telemetrytypes.FieldContextSpan, FieldContext: telemetrytypes.FieldContextSpan,
}} }}
whereClause, err := querybuilder.PrepareWhereClause(tt.expression, querybuilder.FilterExprVisitorOpts{ whereClause, err := querybuilder.PrepareWhereClause(tt.expression, querybuilder.FilterExprVisitorOpts{
Context: context.Background(),
Logger: instrumentationtest.New().Logger(), Logger: instrumentationtest.New().Logger(),
FieldMapper: fm, FieldMapper: fm,
ConditionBuilder: cb, ConditionBuilder: cb,
FieldKeys: fieldKeys, FieldKeys: fieldKeys,
Builder: sb, Builder: sb,
}, tt.startNs, 1761458708000000000) StartNs: tt.startNs,
EndNs: 1761458708000000000,
})
if tt.expectError { if tt.expectError {
assert.Error(t, err) assert.Error(t, err)
@@ -142,13 +146,16 @@ func TestSpanScopeWithResourceFilter(t *testing.T) {
FieldContext: telemetrytypes.FieldContextResource, FieldContext: telemetrytypes.FieldContextResource,
}} }}
_, err := querybuilder.PrepareWhereClause(tt.expression, querybuilder.FilterExprVisitorOpts{ _, err := querybuilder.PrepareWhereClause(tt.expression, querybuilder.FilterExprVisitorOpts{
Context: context.Background(),
Logger: instrumentationtest.New().Logger(), Logger: instrumentationtest.New().Logger(),
FieldMapper: fm, FieldMapper: fm,
ConditionBuilder: cb, ConditionBuilder: cb,
FieldKeys: fieldKeys, FieldKeys: fieldKeys,
SkipResourceFilter: false, // This would be set by the statement builder SkipResourceFilter: false, // This would be set by the statement builder
}, 1761437108000000000, 1761458708000000000) StartNs: 1761437108000000000,
EndNs: 1761458708000000000,
})
assert.NoError(t, err) assert.NoError(t, err)
}) })

View File

@@ -313,7 +313,7 @@ func (b *traceQueryStatementBuilder) buildListQuery(
// TODO: should we deprecate `SelectFields` and return everything from a span like we do for logs? // TODO: should we deprecate `SelectFields` and return everything from a span like we do for logs?
for _, field := range query.SelectFields { for _, field := range query.SelectFields {
colExpr, err := b.fm.ColumnExpressionFor(ctx, &field, keys) colExpr, err := b.fm.ColumnExpressionFor(ctx, start, end, &field, keys)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -331,7 +331,7 @@ func (b *traceQueryStatementBuilder) buildListQuery(
// Add order by // Add order by
for _, orderBy := range query.Order { for _, orderBy := range query.Order {
colExpr, err := b.fm.ColumnExpressionFor(ctx, &orderBy.Key.TelemetryFieldKey, keys) colExpr, err := b.fm.ColumnExpressionFor(ctx, start, end, &orderBy.Key.TelemetryFieldKey, keys)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -515,7 +515,7 @@ func (b *traceQueryStatementBuilder) buildTimeSeriesQuery(
// Keep original column expressions so we can build the tuple // Keep original column expressions so we can build the tuple
fieldNames := make([]string, 0, len(query.GroupBy)) fieldNames := make([]string, 0, len(query.GroupBy))
for _, gb := range query.GroupBy { for _, gb := range query.GroupBy {
expr, args, err := querybuilder.CollisionHandledFinalExpr(ctx, &gb.TelemetryFieldKey, b.fm, b.cb, keys, telemetrytypes.FieldDataTypeString, nil) expr, args, err := querybuilder.CollisionHandledFinalExpr(ctx, start, end, &gb.TelemetryFieldKey, b.fm, b.cb, keys, telemetrytypes.FieldDataTypeString, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -529,7 +529,7 @@ func (b *traceQueryStatementBuilder) buildTimeSeriesQuery(
allAggChArgs := make([]any, 0) allAggChArgs := make([]any, 0)
for i, agg := range query.Aggregations { for i, agg := range query.Aggregations {
rewritten, chArgs, err := b.aggExprRewriter.Rewrite( rewritten, chArgs, err := b.aggExprRewriter.Rewrite(
ctx, agg.Expression, ctx, start, end, agg.Expression,
uint64(query.StepInterval.Seconds()), uint64(query.StepInterval.Seconds()),
keys, keys,
) )
@@ -657,7 +657,7 @@ func (b *traceQueryStatementBuilder) buildScalarQuery(
var allGroupByArgs []any var allGroupByArgs []any
for _, gb := range query.GroupBy { for _, gb := range query.GroupBy {
expr, args, err := querybuilder.CollisionHandledFinalExpr(ctx, &gb.TelemetryFieldKey, b.fm, b.cb, keys, telemetrytypes.FieldDataTypeString, nil) expr, args, err := querybuilder.CollisionHandledFinalExpr(ctx, start, end, &gb.TelemetryFieldKey, b.fm, b.cb, keys, telemetrytypes.FieldDataTypeString, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -674,7 +674,7 @@ func (b *traceQueryStatementBuilder) buildScalarQuery(
for idx := range query.Aggregations { for idx := range query.Aggregations {
aggExpr := query.Aggregations[idx] aggExpr := query.Aggregations[idx]
rewritten, chArgs, err := b.aggExprRewriter.Rewrite( rewritten, chArgs, err := b.aggExprRewriter.Rewrite(
ctx, aggExpr.Expression, ctx, start, end, aggExpr.Expression,
rateInterval, rateInterval,
keys, keys,
) )
@@ -746,7 +746,7 @@ func (b *traceQueryStatementBuilder) buildScalarQuery(
// buildFilterCondition builds SQL condition from filter expression // buildFilterCondition builds SQL condition from filter expression
func (b *traceQueryStatementBuilder) addFilterCondition( func (b *traceQueryStatementBuilder) addFilterCondition(
_ context.Context, ctx context.Context,
sb *sqlbuilder.SelectBuilder, sb *sqlbuilder.SelectBuilder,
start, end uint64, start, end uint64,
query qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation], query qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation],
@@ -760,13 +760,16 @@ func (b *traceQueryStatementBuilder) addFilterCondition(
if query.Filter != nil && query.Filter.Expression != "" { if query.Filter != nil && query.Filter.Expression != "" {
// add filter expression // add filter expression
preparedWhereClause, err = querybuilder.PrepareWhereClause(query.Filter.Expression, querybuilder.FilterExprVisitorOpts{ preparedWhereClause, err = querybuilder.PrepareWhereClause(query.Filter.Expression, querybuilder.FilterExprVisitorOpts{
Context: ctx,
Logger: b.logger, Logger: b.logger,
FieldMapper: b.fm, FieldMapper: b.fm,
ConditionBuilder: b.cb, ConditionBuilder: b.cb,
FieldKeys: keys, FieldKeys: keys,
SkipResourceFilter: true, SkipResourceFilter: true,
Variables: variables, Variables: variables,
}, start, end) StartNs: start,
EndNs: end,
})
if err != nil { if err != nil {
return nil, err return nil, err

View File

@@ -232,12 +232,15 @@ func (b *traceOperatorCTEBuilder) buildQueryCTE(ctx context.Context, queryName s
filterWhereClause, err := querybuilder.PrepareWhereClause( filterWhereClause, err := querybuilder.PrepareWhereClause(
query.Filter.Expression, query.Filter.Expression,
querybuilder.FilterExprVisitorOpts{ querybuilder.FilterExprVisitorOpts{
Context: ctx,
Logger: b.stmtBuilder.logger, Logger: b.stmtBuilder.logger,
FieldMapper: b.stmtBuilder.fm, FieldMapper: b.stmtBuilder.fm,
ConditionBuilder: b.stmtBuilder.cb, ConditionBuilder: b.stmtBuilder.cb,
FieldKeys: keys, FieldKeys: keys,
SkipResourceFilter: true, SkipResourceFilter: true,
}, b.start, b.end, StartNs: b.start,
EndNs: b.end,
},
) )
if err != nil { if err != nil {
b.stmtBuilder.logger.ErrorContext(ctx, "Failed to prepare where clause", "error", err, "filter", query.Filter.Expression) b.stmtBuilder.logger.ErrorContext(ctx, "Failed to prepare where clause", "error", err, "filter", query.Filter.Expression)
@@ -450,7 +453,7 @@ func (b *traceOperatorCTEBuilder) buildListQuery(ctx context.Context, selectFrom
if selectedFields[field.Name] { if selectedFields[field.Name] {
continue continue
} }
colExpr, err := b.stmtBuilder.fm.ColumnExpressionFor(ctx, &field, keys) colExpr, err := b.stmtBuilder.fm.ColumnExpressionFor(ctx, b.start, b.end, &field, keys)
if err != nil { if err != nil {
b.stmtBuilder.logger.WarnContext(ctx, "failed to map select field", b.stmtBuilder.logger.WarnContext(ctx, "failed to map select field",
"field", field.Name, "error", err) "field", field.Name, "error", err)
@@ -465,7 +468,7 @@ func (b *traceOperatorCTEBuilder) buildListQuery(ctx context.Context, selectFrom
// Add order by support using ColumnExpressionFor // Add order by support using ColumnExpressionFor
orderApplied := false orderApplied := false
for _, orderBy := range b.operator.Order { for _, orderBy := range b.operator.Order {
colExpr, err := b.stmtBuilder.fm.ColumnExpressionFor(ctx, &orderBy.Key.TelemetryFieldKey, keys) colExpr, err := b.stmtBuilder.fm.ColumnExpressionFor(ctx, b.start, b.end, &orderBy.Key.TelemetryFieldKey, keys)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -547,6 +550,8 @@ func (b *traceOperatorCTEBuilder) buildTimeSeriesQuery(ctx context.Context, sele
for _, gb := range b.operator.GroupBy { for _, gb := range b.operator.GroupBy {
expr, args, err := querybuilder.CollisionHandledFinalExpr( expr, args, err := querybuilder.CollisionHandledFinalExpr(
ctx, ctx,
b.start,
b.end,
&gb.TelemetryFieldKey, &gb.TelemetryFieldKey,
b.stmtBuilder.fm, b.stmtBuilder.fm,
b.stmtBuilder.cb, b.stmtBuilder.cb,
@@ -571,6 +576,8 @@ func (b *traceOperatorCTEBuilder) buildTimeSeriesQuery(ctx context.Context, sele
for i, agg := range b.operator.Aggregations { for i, agg := range b.operator.Aggregations {
rewritten, chArgs, err := b.stmtBuilder.aggExprRewriter.Rewrite( rewritten, chArgs, err := b.stmtBuilder.aggExprRewriter.Rewrite(
ctx, ctx,
b.start,
b.end,
agg.Expression, agg.Expression,
uint64(b.operator.StepInterval.Seconds()), uint64(b.operator.StepInterval.Seconds()),
keys, keys,
@@ -656,6 +663,8 @@ func (b *traceOperatorCTEBuilder) buildTraceQuery(ctx context.Context, selectFro
for _, gb := range b.operator.GroupBy { for _, gb := range b.operator.GroupBy {
expr, args, err := querybuilder.CollisionHandledFinalExpr( expr, args, err := querybuilder.CollisionHandledFinalExpr(
ctx, ctx,
b.start,
b.end,
&gb.TelemetryFieldKey, &gb.TelemetryFieldKey,
b.stmtBuilder.fm, b.stmtBuilder.fm,
b.stmtBuilder.cb, b.stmtBuilder.cb,
@@ -682,6 +691,8 @@ func (b *traceOperatorCTEBuilder) buildTraceQuery(ctx context.Context, selectFro
for i, agg := range b.operator.Aggregations { for i, agg := range b.operator.Aggregations {
rewritten, chArgs, err := b.stmtBuilder.aggExprRewriter.Rewrite( rewritten, chArgs, err := b.stmtBuilder.aggExprRewriter.Rewrite(
ctx, ctx,
b.start,
b.end,
agg.Expression, agg.Expression,
rateInterval, rateInterval,
keys, keys,
@@ -795,6 +806,8 @@ func (b *traceOperatorCTEBuilder) buildScalarQuery(ctx context.Context, selectFr
for _, gb := range b.operator.GroupBy { for _, gb := range b.operator.GroupBy {
expr, args, err := querybuilder.CollisionHandledFinalExpr( expr, args, err := querybuilder.CollisionHandledFinalExpr(
ctx, ctx,
b.start,
b.end,
&gb.TelemetryFieldKey, &gb.TelemetryFieldKey,
b.stmtBuilder.fm, b.stmtBuilder.fm,
b.stmtBuilder.cb, b.stmtBuilder.cb,
@@ -819,6 +832,8 @@ func (b *traceOperatorCTEBuilder) buildScalarQuery(ctx context.Context, selectFr
for i, agg := range b.operator.Aggregations { for i, agg := range b.operator.Aggregations {
rewritten, chArgs, err := b.stmtBuilder.aggExprRewriter.Rewrite( rewritten, chArgs, err := b.stmtBuilder.aggExprRewriter.Rewrite(
ctx, ctx,
b.start,
b.end,
agg.Expression, agg.Expression,
uint64((b.end-b.start)/querybuilder.NsToSeconds), uint64((b.end-b.start)/querybuilder.NsToSeconds),
keys, keys,

View File

@@ -2,12 +2,13 @@ package telemetrytraces
import ( import (
"context" "context"
"log/slog"
"github.com/SigNoz/signoz/pkg/errors" "github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory" "github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/querybuilder" "github.com/SigNoz/signoz/pkg/querybuilder"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5" qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes" "github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"log/slog"
) )
type traceOperatorStatementBuilder struct { type traceOperatorStatementBuilder struct {

View File

@@ -22,24 +22,23 @@ type JsonKeyToFieldFunc func(context.Context, *telemetrytypes.TelemetryFieldKey,
// FieldMapper maps the telemetry field key to the table field name. // FieldMapper maps the telemetry field key to the table field name.
type FieldMapper interface { type FieldMapper interface {
// FieldFor returns the field name for the given key. // FieldFor returns the field name for the given key.
FieldFor(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (string, error) FieldFor(ctx context.Context, tsStart, tsEnd uint64, key *telemetrytypes.TelemetryFieldKey) (string, error)
// ColumnFor returns the column for the given key. // ColumnFor returns the column for the given key.
ColumnFor(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (*schema.Column, error) ColumnFor(ctx context.Context, tsStart, tsEnd uint64, key *telemetrytypes.TelemetryFieldKey) ([]*schema.Column, error)
// ColumnExpressionFor returns the column expression for the given key. // ColumnExpressionFor returns the column expression for the given key.
ColumnExpressionFor(ctx context.Context, key *telemetrytypes.TelemetryFieldKey, keys map[string][]*telemetrytypes.TelemetryFieldKey) (string, error) ColumnExpressionFor(ctx context.Context, tsStart, tsEnd uint64, key *telemetrytypes.TelemetryFieldKey, keys map[string][]*telemetrytypes.TelemetryFieldKey) (string, error)
} }
// ConditionBuilder builds the condition for the filter. // ConditionBuilder builds the condition for the filter.
type ConditionBuilder interface { type ConditionBuilder interface {
// ConditionFor returns the condition for the given key, operator and value. // ConditionFor returns the condition for the given key, operator and value.
// TODO(srikanthccv,nikhilmantri0902): remove startNs, endNs when top_level_operations can be replaced with `is_remote` ConditionFor(ctx context.Context, startNs uint64, endNs uint64, key *telemetrytypes.TelemetryFieldKey, operator FilterOperator, value any, sb *sqlbuilder.SelectBuilder) (string, error)
ConditionFor(ctx context.Context, key *telemetrytypes.TelemetryFieldKey, operator FilterOperator, value any, sb *sqlbuilder.SelectBuilder, startNs uint64, endNs uint64) (string, error)
} }
type AggExprRewriter interface { type AggExprRewriter interface {
// Rewrite rewrites the aggregation expression to be used in the query. // Rewrite rewrites the aggregation expression to be used in the query.
Rewrite(ctx context.Context, expr string, rateInterval uint64, keys map[string][]*telemetrytypes.TelemetryFieldKey) (string, []any, error) Rewrite(ctx context.Context, startNs, endNs uint64, expr string, rateInterval uint64, keys map[string][]*telemetrytypes.TelemetryFieldKey) (string, []any, error)
RewriteMulti(ctx context.Context, exprs []string, rateInterval uint64, keys map[string][]*telemetrytypes.TelemetryFieldKey) ([]string, [][]any, error) RewriteMulti(ctx context.Context, startNs, endNs uint64, exprs []string, rateInterval uint64, keys map[string][]*telemetrytypes.TelemetryFieldKey) ([]string, [][]any, error)
} }
type Statement struct { type Statement struct {

View File

@@ -355,6 +355,10 @@ func (r *PostableRule) validate() error {
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "composite query is required")) errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "composite query is required"))
} }
if r.Version != "v5" {
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "only version v5 is supported, got %q", r.Version))
}
if isAllQueriesDisabled(r.RuleCondition.CompositeQuery) { if isAllQueriesDisabled(r.RuleCondition.CompositeQuery) {
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "all queries are disabled in rule condition")) errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "all queries are disabled in rule condition"))
} }

View File

@@ -108,6 +108,7 @@ func TestParseIntoRule(t *testing.T) {
"ruleType": "threshold_rule", "ruleType": "threshold_rule",
"evalWindow": "5m", "evalWindow": "5m",
"frequency": "1m", "frequency": "1m",
"version": "v5",
"condition": { "condition": {
"compositeQuery": { "compositeQuery": {
"queryType": "builder", "queryType": "builder",
@@ -150,6 +151,7 @@ func TestParseIntoRule(t *testing.T) {
content: []byte(`{ content: []byte(`{
"alert": "DefaultsRule", "alert": "DefaultsRule",
"ruleType": "threshold_rule", "ruleType": "threshold_rule",
"version": "v5",
"condition": { "condition": {
"compositeQuery": { "compositeQuery": {
"queryType": "builder", "queryType": "builder",
@@ -187,6 +189,7 @@ func TestParseIntoRule(t *testing.T) {
initRule: PostableRule{}, initRule: PostableRule{},
content: []byte(`{ content: []byte(`{
"alert": "PromQLRule", "alert": "PromQLRule",
"version": "v5",
"condition": { "condition": {
"compositeQuery": { "compositeQuery": {
"queryType": "promql", "queryType": "promql",
@@ -256,6 +259,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
content: []byte(`{ content: []byte(`{
"alert": "SeverityLabelTest", "alert": "SeverityLabelTest",
"schemaVersion": "v1", "schemaVersion": "v1",
"version": "v5",
"condition": { "condition": {
"compositeQuery": { "compositeQuery": {
"queryType": "builder", "queryType": "builder",
@@ -344,6 +348,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
content: []byte(`{ content: []byte(`{
"alert": "NoLabelsTest", "alert": "NoLabelsTest",
"schemaVersion": "v1", "schemaVersion": "v1",
"version": "v5",
"condition": { "condition": {
"compositeQuery": { "compositeQuery": {
"queryType": "builder", "queryType": "builder",
@@ -384,6 +389,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
content: []byte(`{ content: []byte(`{
"alert": "OverwriteTest", "alert": "OverwriteTest",
"schemaVersion": "v1", "schemaVersion": "v1",
"version": "v5",
"condition": { "condition": {
"compositeQuery": { "compositeQuery": {
"queryType": "builder", "queryType": "builder",
@@ -474,6 +480,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
content: []byte(`{ content: []byte(`{
"alert": "V2Test", "alert": "V2Test",
"schemaVersion": "v2", "schemaVersion": "v2",
"version": "v5",
"condition": { "condition": {
"compositeQuery": { "compositeQuery": {
"queryType": "builder", "queryType": "builder",
@@ -517,6 +524,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
initRule: PostableRule{}, initRule: PostableRule{},
content: []byte(`{ content: []byte(`{
"alert": "DefaultSchemaTest", "alert": "DefaultSchemaTest",
"version": "v5",
"condition": { "condition": {
"compositeQuery": { "compositeQuery": {
"queryType": "builder", "queryType": "builder",
@@ -569,6 +577,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
func TestParseIntoRuleThresholdGeneration(t *testing.T) { func TestParseIntoRuleThresholdGeneration(t *testing.T) {
content := []byte(`{ content := []byte(`{
"alert": "TestThresholds", "alert": "TestThresholds",
"version": "v5",
"condition": { "condition": {
"compositeQuery": { "compositeQuery": {
"queryType": "builder", "queryType": "builder",
@@ -639,6 +648,7 @@ func TestParseIntoRuleMultipleThresholds(t *testing.T) {
"schemaVersion": "v2", "schemaVersion": "v2",
"alert": "MultiThresholdAlert", "alert": "MultiThresholdAlert",
"ruleType": "threshold_rule", "ruleType": "threshold_rule",
"version": "v5",
"condition": { "condition": {
"compositeQuery": { "compositeQuery": {
"queryType": "builder", "queryType": "builder",
@@ -732,6 +742,7 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{ ruleJSON: []byte(`{
"alert": "AnomalyBelowTest", "alert": "AnomalyBelowTest",
"ruleType": "anomaly_rule", "ruleType": "anomaly_rule",
"version": "v5",
"condition": { "condition": {
"compositeQuery": { "compositeQuery": {
"queryType": "builder", "queryType": "builder",
@@ -766,6 +777,7 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{ ruleJSON: []byte(`{
"alert": "AnomalyBelowTest", "alert": "AnomalyBelowTest",
"ruleType": "anomaly_rule", "ruleType": "anomaly_rule",
"version": "v5",
"condition": { "condition": {
"compositeQuery": { "compositeQuery": {
"queryType": "builder", "queryType": "builder",
@@ -799,6 +811,7 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{ ruleJSON: []byte(`{
"alert": "AnomalyAboveTest", "alert": "AnomalyAboveTest",
"ruleType": "anomaly_rule", "ruleType": "anomaly_rule",
"version": "v5",
"condition": { "condition": {
"compositeQuery": { "compositeQuery": {
"queryType": "builder", "queryType": "builder",
@@ -833,6 +846,7 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{ ruleJSON: []byte(`{
"alert": "AnomalyAboveTest", "alert": "AnomalyAboveTest",
"ruleType": "anomaly_rule", "ruleType": "anomaly_rule",
"version": "v5",
"condition": { "condition": {
"compositeQuery": { "compositeQuery": {
"queryType": "builder", "queryType": "builder",
@@ -866,6 +880,7 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{ ruleJSON: []byte(`{
"alert": "AnomalyBelowAllTest", "alert": "AnomalyBelowAllTest",
"ruleType": "anomaly_rule", "ruleType": "anomaly_rule",
"version": "v5",
"condition": { "condition": {
"compositeQuery": { "compositeQuery": {
"queryType": "builder", "queryType": "builder",
@@ -901,6 +916,7 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{ ruleJSON: []byte(`{
"alert": "AnomalyBelowAllTest", "alert": "AnomalyBelowAllTest",
"ruleType": "anomaly_rule", "ruleType": "anomaly_rule",
"version": "v5",
"condition": { "condition": {
"compositeQuery": { "compositeQuery": {
"queryType": "builder", "queryType": "builder",
@@ -935,6 +951,7 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{ ruleJSON: []byte(`{
"alert": "AnomalyOutOfBoundsTest", "alert": "AnomalyOutOfBoundsTest",
"ruleType": "anomaly_rule", "ruleType": "anomaly_rule",
"version": "v5",
"condition": { "condition": {
"compositeQuery": { "compositeQuery": {
"queryType": "builder", "queryType": "builder",
@@ -969,6 +986,7 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{ ruleJSON: []byte(`{
"alert": "ThresholdTest", "alert": "ThresholdTest",
"ruleType": "threshold_rule", "ruleType": "threshold_rule",
"version": "v5",
"condition": { "condition": {
"compositeQuery": { "compositeQuery": {
"queryType": "builder", "queryType": "builder",
@@ -1003,6 +1021,7 @@ func TestAnomalyNegationEval(t *testing.T) {
ruleJSON: []byte(`{ ruleJSON: []byte(`{
"alert": "ThresholdTest", "alert": "ThresholdTest",
"ruleType": "threshold_rule", "ruleType": "threshold_rule",
"version": "v5",
"condition": { "condition": {
"compositeQuery": { "compositeQuery": {
"queryType": "builder", "queryType": "builder",

View File

@@ -0,0 +1,25 @@
package telemetrytypes
import (
"time"
)
type EvolutionEntry struct {
Signal Signal `json:"signal"`
ColumnName string `json:"column_name"`
ColumnType string `json:"column_type"`
FieldContext FieldContext `json:"field_context"`
FieldName string `json:"field_name"`
ReleaseTime time.Time `json:"release_time"`
Version uint32 `json:"version"`
}
type EvolutionSelector struct {
Signal Signal
FieldContext FieldContext
FieldName string
}
func GetEvolutionMetadataUniqueKey(selector *EvolutionSelector) string {
return selector.Signal.StringValue() + ":" + selector.FieldContext.StringValue() + ":" + selector.FieldName
}

View File

@@ -41,6 +41,8 @@ type TelemetryFieldKey struct {
JSONPlan JSONAccessPlan `json:"-"` JSONPlan JSONAccessPlan `json:"-"`
Indexes []JSONDataTypeIndex `json:"-"` Indexes []JSONDataTypeIndex `json:"-"`
Materialized bool `json:"-"` // refers to promoted in case of body.... fields Materialized bool `json:"-"` // refers to promoted in case of body.... fields
Evolutions []*EvolutionEntry `json:"-"`
} }
func (f *TelemetryFieldKey) KeyNameContainsArray() bool { func (f *TelemetryFieldKey) KeyNameContainsArray() bool {

View File

@@ -41,6 +41,9 @@ type MetadataStore interface {
// PromotePaths promotes the paths. // PromotePaths promotes the paths.
PromotePaths(ctx context.Context, paths ...string) error PromotePaths(ctx context.Context, paths ...string) error
// GetColumnEvolutionMetadataMulti returns a list of evolution entries for the given selectors.
GetColumnEvolutionMetadataMulti(ctx context.Context, selectors []*EvolutionSelector) (map[string][]*EvolutionEntry, error)
// GetFirstSeenFromMetricMetadata gets the first seen timestamp for a metric metadata lookup key. // GetFirstSeenFromMetricMetadata gets the first seen timestamp for a metric metadata lookup key.
GetFirstSeenFromMetricMetadata(ctx context.Context, lookupKeys []MetricMetadataLookupKey) (map[MetricMetadataLookupKey]int64, error) GetFirstSeenFromMetricMetadata(ctx context.Context, lookupKeys []MetricMetadataLookupKey) (map[MetricMetadataLookupKey]int64, error)
} }

View File

@@ -12,25 +12,27 @@ import (
// MockMetadataStore implements the MetadataStore interface for testing purposes // MockMetadataStore implements the MetadataStore interface for testing purposes
type MockMetadataStore struct { type MockMetadataStore struct {
// Maps to store test data // Maps to store test data
KeysMap map[string][]*telemetrytypes.TelemetryFieldKey KeysMap map[string][]*telemetrytypes.TelemetryFieldKey
RelatedValuesMap map[string][]string RelatedValuesMap map[string][]string
AllValuesMap map[string]*telemetrytypes.TelemetryFieldValues AllValuesMap map[string]*telemetrytypes.TelemetryFieldValues
TemporalityMap map[string]metrictypes.Temporality TemporalityMap map[string]metrictypes.Temporality
PromotedPathsMap map[string]struct{} PromotedPathsMap map[string]struct{}
LogsJSONIndexesMap map[string][]schemamigrator.Index LogsJSONIndexesMap map[string][]schemamigrator.Index
LookupKeysMap map[telemetrytypes.MetricMetadataLookupKey]int64 ColumnEvolutionMetadataMap map[string][]*telemetrytypes.EvolutionEntry
LookupKeysMap map[telemetrytypes.MetricMetadataLookupKey]int64
} }
// NewMockMetadataStore creates a new instance of MockMetadataStore with initialized maps // NewMockMetadataStore creates a new instance of MockMetadataStore with initialized maps
func NewMockMetadataStore() *MockMetadataStore { func NewMockMetadataStore() *MockMetadataStore {
return &MockMetadataStore{ return &MockMetadataStore{
KeysMap: make(map[string][]*telemetrytypes.TelemetryFieldKey), KeysMap: make(map[string][]*telemetrytypes.TelemetryFieldKey),
RelatedValuesMap: make(map[string][]string), RelatedValuesMap: make(map[string][]string),
AllValuesMap: make(map[string]*telemetrytypes.TelemetryFieldValues), AllValuesMap: make(map[string]*telemetrytypes.TelemetryFieldValues),
TemporalityMap: make(map[string]metrictypes.Temporality), TemporalityMap: make(map[string]metrictypes.Temporality),
PromotedPathsMap: make(map[string]struct{}), PromotedPathsMap: make(map[string]struct{}),
LogsJSONIndexesMap: make(map[string][]schemamigrator.Index), LogsJSONIndexesMap: make(map[string][]schemamigrator.Index),
LookupKeysMap: make(map[telemetrytypes.MetricMetadataLookupKey]int64), ColumnEvolutionMetadataMap: make(map[string][]*telemetrytypes.EvolutionEntry),
LookupKeysMap: make(map[telemetrytypes.MetricMetadataLookupKey]int64),
} }
} }
@@ -95,9 +97,51 @@ func (m *MockMetadataStore) GetKeysMulti(ctx context.Context, fieldKeySelectors
} }
} }
// fetch and add evolutions
for k, v := range result {
keys := v
evolutionMetadataKeySelectors := getEvolutionMetadataKeySelectors(keys)
evolutions, err := m.GetColumnEvolutionMetadataMulti(ctx, evolutionMetadataKeySelectors)
if err != nil {
return nil, false, err
}
for i, key := range keys {
// first check if there is evolutions that with field name as __all__
// then check for specific field name
selector := &telemetrytypes.EvolutionSelector{
Signal: key.Signal,
FieldContext: key.FieldContext,
FieldName: "__all__",
}
if keyEvolutions, ok := evolutions[telemetrytypes.GetEvolutionMetadataUniqueKey(selector)]; ok {
keys[i].Evolutions = keyEvolutions
}
selector.FieldName = key.Name
if keyEvolutions, ok := evolutions[telemetrytypes.GetEvolutionMetadataUniqueKey(selector)]; ok {
keys[i].Evolutions = keyEvolutions
}
}
result[k] = keys
}
return result, true, nil return result, true, nil
} }
func getEvolutionMetadataKeySelectors(keySelectors []*telemetrytypes.TelemetryFieldKey) []*telemetrytypes.EvolutionSelector {
var metadataKeySelectors []*telemetrytypes.EvolutionSelector
for _, keySelector := range keySelectors {
selector := &telemetrytypes.EvolutionSelector{
Signal: keySelector.Signal,
FieldContext: keySelector.FieldContext,
FieldName: keySelector.Name,
}
metadataKeySelectors = append(metadataKeySelectors, selector)
}
return metadataKeySelectors
}
// GetKey returns a list of keys with the given name // GetKey returns a list of keys with the given name
func (m *MockMetadataStore) GetKey(ctx context.Context, fieldKeySelector *telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, error) { func (m *MockMetadataStore) GetKey(ctx context.Context, fieldKeySelector *telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, error) {
if fieldKeySelector == nil { if fieldKeySelector == nil {
@@ -310,6 +354,27 @@ func (m *MockMetadataStore) ListLogsJSONIndexes(ctx context.Context, filters ...
return m.LogsJSONIndexesMap, nil return m.LogsJSONIndexesMap, nil
} }
func (m *MockMetadataStore) GetColumnEvolutionMetadataMulti(ctx context.Context, selectors []*telemetrytypes.EvolutionSelector) (map[string][]*telemetrytypes.EvolutionEntry, error) {
result := make(map[string][]*telemetrytypes.EvolutionEntry)
// Iterate over each selector
for i, selector := range selectors {
// Build the key: Signal:FieldContext:FieldName
selector.FieldName = "__all__"
key := telemetrytypes.GetEvolutionMetadataUniqueKey(selector)
if entries, exists := m.ColumnEvolutionMetadataMap[key]; exists {
result[key] = entries
}
selector.FieldName = selectors[i].FieldName
key = telemetrytypes.GetEvolutionMetadataUniqueKey(selector)
if entries, exists := m.ColumnEvolutionMetadataMap[key]; exists {
result[key] = entries
}
}
return result, nil
}
func (m *MockMetadataStore) GetFirstSeenFromMetricMetadata(ctx context.Context, lookupKeys []telemetrytypes.MetricMetadataLookupKey) (map[telemetrytypes.MetricMetadataLookupKey]int64, error) { func (m *MockMetadataStore) GetFirstSeenFromMetricMetadata(ctx context.Context, lookupKeys []telemetrytypes.MetricMetadataLookupKey) (map[telemetrytypes.MetricMetadataLookupKey]int64, error) {
return m.LookupKeysMap, nil return m.LookupKeysMap, nil
} }