Compare commits

..

65 Commits

Author SHA1 Message Date
swapnil-signoz
ee5d182539 Merge branch 'main' into refactor/cloud-integration-modules 2026-03-24 17:50:54 +05:30
swapnil-signoz
0bc12f02bc Merge branch 'main' into refactor/cloud-integration-handlers 2026-03-24 10:59:04 +05:30
swapnil-signoz
e5f00421fe Merge branch 'main' into refactor/cloud-integration-handlers 2026-03-23 21:05:26 +05:30
swapnil-signoz
539252e10c feat: adding frontend openapi schema 2026-03-23 12:33:14 +05:30
swapnil-signoz
d65f426254 chore: removing todo comment 2026-03-23 12:24:04 +05:30
swapnil-signoz
6e52f2c8f0 Merge branch 'refactor/cloud-integration-impl-store' into refactor/cloud-integration-handlers 2026-03-22 17:13:53 +05:30
swapnil-signoz
d9f8a4ae5a Merge branch 'main' into refactor/cloud-integration-impl-store 2026-03-22 17:13:40 +05:30
swapnil-signoz
eefe3edffd Merge branch 'main' into refactor/cloud-integration-handlers 2026-03-22 17:13:02 +05:30
swapnil-signoz
2051861a03 feat: adding handler skeleton 2026-03-22 17:12:35 +05:30
swapnil-signoz
4b01a40fb9 Merge branch 'refactor/cloud-integration-impl-store' into refactor/cloud-integration-handlers 2026-03-20 20:53:54 +05:30
swapnil-signoz
2d8a00bf18 fix: update error code for service not found 2026-03-20 20:53:33 +05:30
swapnil-signoz
f1b26b310f Merge branch 'main' into refactor/cloud-integration-impl-store 2026-03-20 20:51:44 +05:30
swapnil-signoz
2c438b6c32 Merge branch 'refactor/cloud-integration-impl-store' into refactor/cloud-integration-handlers 2026-03-20 20:48:34 +05:30
swapnil-signoz
1814c2d13c Merge branch 'main' into refactor/cloud-integration-handlers 2026-03-20 17:52:31 +05:30
swapnil-signoz
e6cd771f11 Merge origin/main into refactor/cloud-integration-handlers 2026-03-20 16:46:36 +05:30
swapnil-signoz
6b94f87ca0 Merge branch 'main' into refactor/cloud-integration-handlers 2026-03-19 11:43:21 +05:30
swapnil-signoz
bf315253ae fix: lint issues 2026-03-19 11:43:09 +05:30
swapnil-signoz
668ff7bc39 fix: lint and ci issues 2026-03-19 11:34:27 +05:30
swapnil-signoz
07f2aa52fd feat: adding handlers 2026-03-19 01:35:01 +05:30
swapnil-signoz
3416b3ad55 Merge branch 'main' into refactor/cloud-integration-handlers 2026-03-18 21:50:40 +05:30
swapnil-signoz
d6caa4f2c7 Merge branch 'main' into refactor/cloud-integration-impl-store 2026-03-18 14:08:14 +05:30
swapnil-signoz
f86371566d refactor: clean up 2026-03-18 13:45:31 +05:30
swapnil-signoz
9115803084 Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-18 13:42:43 +05:30
swapnil-signoz
0c14d8f966 refactor: review comments 2026-03-18 13:40:17 +05:30
swapnil-signoz
7afb461af8 Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-18 11:14:33 +05:30
swapnil-signoz
a21fbb4ee0 refactor: clean up 2026-03-18 11:14:05 +05:30
swapnil-signoz
0369842f3d refactor: clean up 2026-03-17 23:40:14 +05:30
swapnil-signoz
59cd96562a Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-17 23:10:54 +05:30
swapnil-signoz
cc4475cab7 refactor: updating store methods 2026-03-17 23:10:15 +05:30
swapnil-signoz
ac8c648420 Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-17 21:09:47 +05:30
swapnil-signoz
bede6be4b8 feat: adding method for service id creation 2026-03-17 21:09:26 +05:30
swapnil-signoz
dd3d60e6df Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-17 20:49:31 +05:30
swapnil-signoz
538ab686d2 refactor: using serviceID type 2026-03-17 20:49:17 +05:30
swapnil-signoz
936a325cb9 Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-17 17:25:58 +05:30
swapnil-signoz
c6cdcd0143 refactor: renaming service type to service id 2026-03-17 17:25:29 +05:30
swapnil-signoz
cd9211d718 refactor: clean up types 2026-03-17 17:04:27 +05:30
swapnil-signoz
0601c28782 feat: adding integration test 2026-03-17 11:02:46 +05:30
swapnil-signoz
580610dbfa Merge branch 'main' into refactor/cloud-integration-impl-store 2026-03-16 23:02:19 +05:30
swapnil-signoz
2d2aa02a81 refactor: split upsert store method 2026-03-16 18:27:42 +05:30
swapnil-signoz
dd9723ad13 Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-16 17:42:03 +05:30
swapnil-signoz
3651469416 Merge branch 'main' of https://github.com/SigNoz/signoz into refactor/cloud-integration-types 2026-03-16 17:41:52 +05:30
swapnil-signoz
febce75734 refactor: update Dashboard struct comments and remove unused fields 2026-03-16 17:41:28 +05:30
swapnil-signoz
e1616f3487 Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-16 17:36:15 +05:30
swapnil-signoz
4b94287ac7 refactor: add comments for backward compatibility in PostableAgentCheckInRequest 2026-03-16 15:48:20 +05:30
swapnil-signoz
1575c7c54c refactor: streamlining types 2026-03-16 15:39:32 +05:30
swapnil-signoz
8def3f835b refactor: adding comments and removed wrong code 2026-03-16 11:10:53 +05:30
swapnil-signoz
11ed15f4c5 feat: implement cloud integration store 2026-03-14 17:05:02 +05:30
swapnil-signoz
f47877cca9 Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-14 17:01:51 +05:30
swapnil-signoz
bb2b9215ba fix: correct GetService signature and remove shadowed Data field 2026-03-14 16:59:07 +05:30
swapnil-signoz
3111904223 Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-14 16:36:35 +05:30
swapnil-signoz
003e2c30d8 Merge branch 'main' into refactor/cloud-integration-types 2026-03-14 16:25:35 +05:30
swapnil-signoz
00fe516d10 refactor: update cloud integration types and module interface 2026-03-14 16:25:16 +05:30
swapnil-signoz
0305f4f7db refactor: using struct for map 2026-03-13 16:09:26 +05:30
swapnil-signoz
c60019a6dc Merge branch 'main' into refactor/cloud-integration-types 2026-03-12 23:41:22 +05:30
swapnil-signoz
acde2a37fa feat: adding updated types for cloud integration 2026-03-12 23:40:44 +05:30
swapnil-signoz
945241a52a Merge branch 'main' into refactor/cloud-integration-types 2026-03-12 19:45:50 +05:30
swapnil-signoz
e967f80c86 Merge branch 'main' into refactor/cloud-integration-types 2026-03-02 16:39:42 +05:30
swapnil-signoz
a09dc325de Merge branch 'main' into refactor/cloud-integration-impl-store 2026-03-02 16:39:20 +05:30
swapnil-signoz
379b4f7fc4 refactor: removing interface check 2026-03-02 14:50:37 +05:30
swapnil-signoz
5e536ae077 Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-02 14:49:35 +05:30
swapnil-signoz
234585e642 Merge branch 'main' into refactor/cloud-integration-types 2026-03-02 14:49:19 +05:30
swapnil-signoz
2cc14f1ad4 Merge branch 'main' into refactor/cloud-integration-impl-store 2026-03-02 14:49:00 +05:30
swapnil-signoz
dc4ed4d239 feat: adding sql store implementation 2026-03-02 14:44:56 +05:30
swapnil-signoz
7281c36873 refactor: store interfaces to use local types and error 2026-03-02 13:27:46 +05:30
swapnil-signoz
40288776e8 feat: adding cloud integration type for refactor 2026-02-28 16:59:14 +05:30
23 changed files with 70 additions and 564 deletions

View File

@@ -144,8 +144,6 @@ telemetrystore:
##################### Prometheus #####################
prometheus:
# The maximum time a PromQL query is allowed to run before being aborted.
timeout: 2m
active_query_tracker:
# Whether to enable the active query tracker.
enabled: true

View File

@@ -190,7 +190,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:v0.117.0
image: signoz/signoz:v0.116.1
ports:
- "8080:8080" # signoz port
# - "6060:6060" # pprof port

View File

@@ -117,7 +117,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:v0.117.0
image: signoz/signoz:v0.116.1
ports:
- "8080:8080" # signoz port
volumes:

View File

@@ -181,7 +181,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:${VERSION:-v0.117.0}
image: signoz/signoz:${VERSION:-v0.116.1}
container_name: signoz
ports:
- "8080:8080" # signoz port

View File

@@ -109,7 +109,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:${VERSION:-v0.117.0}
image: signoz/signoz:${VERSION:-v0.116.1}
container_name: signoz
ports:
- "8080:8080" # signoz port

View File

@@ -136,7 +136,6 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
signoz.SQLStore,
integrationsController.GetPipelinesForInstalledIntegrations,
reader,
)
if err != nil {
return nil, err

View File

@@ -257,7 +257,7 @@ func TestManager_TestNotification_SendUnmatched_PromRule(t *testing.T) {
WillReturnRows(samplesRows)
// Create Prometheus provider for this test
promProvider = prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, store)
promProvider = prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, store)
},
ManagerOptionsHook: func(opts *rules.ManagerOptions) {
// Set Prometheus provider for PromQL queries

View File

@@ -3,7 +3,6 @@ package prometheus
import (
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
)
@@ -21,9 +20,6 @@ type Config struct {
//
// If not set, the prometheus default is used (currently 5m).
LookbackDelta time.Duration `mapstructure:"lookback_delta"`
// Timeout is the maximum time a query is allowed to run before being aborted.
Timeout time.Duration `mapstructure:"timeout"`
}
func NewConfigFactory() factory.ConfigFactory {
@@ -37,14 +33,10 @@ func newConfig() factory.Config {
Path: "",
MaxConcurrent: 20,
},
Timeout: 2 * time.Minute,
}
}
func (c Config) Validate() error {
if c.Timeout <= 0 {
return errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "prometheus::timeout must be greater than 0")
}
return nil
}

View File

@@ -0,0 +1,3 @@
package prometheus
const FingerprintAsPromLabelName = "fingerprint"

View File

@@ -2,6 +2,7 @@ package prometheus
import (
"log/slog"
"time"
"github.com/prometheus/prometheus/promql"
)
@@ -20,7 +21,7 @@ func NewEngine(logger *slog.Logger, cfg Config) *Engine {
Logger: logger,
Reg: nil,
MaxSamples: 5_0000_000,
Timeout: cfg.Timeout,
Timeout: 2 * time.Minute,
ActiveQueryTracker: activeQueryTracker,
LookbackDelta: cfg.LookbackDelta,
})

View File

@@ -6,8 +6,6 @@ import (
"github.com/prometheus/prometheus/promql"
)
const FingerprintAsPromLabelName string = "fingerprint"
func RemoveExtraLabels(res *promql.Result, labelsToRemove ...string) error {
if len(labelsToRemove) == 0 || res == nil {
return nil

View File

@@ -36,28 +36,6 @@ var unquotedDottedNamePattern = regexp.MustCompile(`(?:^|[{,(\s])([a-zA-Z_][a-zA
// This is a common mistake when migrating to UTF-8 syntax.
var quotedMetricOutsideBracesPattern = regexp.MustCompile(`"([^"]+)"\s*\{`)
// tryEnhancePromQLExecError attempts to convert a PromQL execution error into
// a properly typed error. Returns nil if the error is not a recognized execution error.
func tryEnhancePromQLExecError(execErr error) error {
var eqc promql.ErrQueryCanceled
var eqt promql.ErrQueryTimeout
var es promql.ErrStorage
switch {
case errors.As(execErr, &eqc):
return errors.Newf(errors.TypeCanceled, errors.CodeCanceled, "query canceled").WithAdditional(eqc.Error())
case errors.As(execErr, &eqt):
return errors.Newf(errors.TypeTimeout, errors.CodeTimeout, "query timed out").WithAdditional(eqt.Error())
case errors.Is(execErr, context.DeadlineExceeded):
return errors.Newf(errors.TypeTimeout, errors.CodeTimeout, "query timed out")
case errors.Is(execErr, context.Canceled):
return errors.Newf(errors.TypeCanceled, errors.CodeCanceled, "query canceled")
case errors.As(execErr, &es):
return errors.Newf(errors.TypeInternal, errors.CodeInternal, "query execution error: %v", execErr)
default:
return nil
}
}
// enhancePromQLError adds helpful context to PromQL parse errors,
// particularly for UTF-8 syntax migration issues where metric and label
// names containing dots need to be quoted.
@@ -235,20 +213,27 @@ func (q *promqlQuery) Execute(ctx context.Context) (*qbv5.Result, error) {
time.Unix(0, end),
q.query.Step.Duration,
)
if err != nil {
// NewRangeQuery can fail with execution errors (e.g. context deadline exceeded)
// during the query queue/scheduling stage, not just parse errors.
if err := tryEnhancePromQLExecError(err); err != nil {
return nil, err
}
if err != nil {
return nil, enhancePromQLError(query, err)
}
res := qry.Exec(ctx)
if res.Err != nil {
if err := tryEnhancePromQLExecError(res.Err); err != nil {
return nil, err
var eqc promql.ErrQueryCanceled
var eqt promql.ErrQueryTimeout
var es promql.ErrStorage
switch {
case errors.As(res.Err, &eqc):
return nil, errors.Newf(errors.TypeCanceled, errors.CodeCanceled, "query canceled")
case errors.As(res.Err, &eqt):
return nil, errors.Newf(errors.TypeTimeout, errors.CodeTimeout, "query timeout")
case errors.As(res.Err, &es):
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "query execution error: %v", res.Err)
}
if errors.Is(res.Err, context.Canceled) {
return nil, errors.Newf(errors.TypeCanceled, errors.CodeCanceled, "query canceled")
}
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "query execution error: %v", res.Err)

View File

@@ -4,7 +4,6 @@ import (
"context"
"encoding/json"
"fmt"
"maps"
"slices"
"strings"
@@ -13,7 +12,6 @@ import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
"github.com/SigNoz/signoz/pkg/query-service/constants"
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
"github.com/SigNoz/signoz/pkg/query-service/model"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/utils"
@@ -36,101 +34,19 @@ type LogParsingPipelineController struct {
Repo
GetIntegrationPipelines func(context.Context, string) ([]pipelinetypes.GettablePipeline, error)
// TODO(Piyush): remove with qbv5 migration
reader interfaces.Reader
}
func NewLogParsingPipelinesController(
sqlStore sqlstore.SQLStore,
getIntegrationPipelines func(context.Context, string) ([]pipelinetypes.GettablePipeline, error),
reader interfaces.Reader,
) (*LogParsingPipelineController, error) {
repo := NewRepo(sqlStore)
return &LogParsingPipelineController{
Repo: repo,
GetIntegrationPipelines: getIntegrationPipelines,
reader: reader,
}, nil
}
// enrichPipelinesFilters resolves the type (tag vs resource) for filter keys that are
// missing type info, by looking them up in the store.
//
// TODO(Piyush): remove with qbv5 migration
func (pc *LogParsingPipelineController) enrichPipelinesFilters(
ctx context.Context, pipelines []pipelinetypes.GettablePipeline,
) ([]pipelinetypes.GettablePipeline, error) {
// Collect names of non-static keys that are missing type info.
// Static fields (body, trace_id, etc.) are intentionally Unspecified and map
// to top-level OTEL fields — they do not need enrichment.
unspecifiedNames := map[string]struct{}{}
for _, p := range pipelines {
if p.Filter != nil {
for _, item := range p.Filter.Items {
if item.Key.Type == v3.AttributeKeyTypeUnspecified {
// Skip static fields
if _, isStatic := constants.StaticFieldsLogsV3[item.Key.Key]; isStatic {
continue
}
// Skip enrich body.* fields
if strings.HasPrefix(item.Key.Key, "body.") {
continue
}
unspecifiedNames[item.Key.Key] = struct{}{}
}
}
}
}
if len(unspecifiedNames) == 0 {
return pipelines, nil
}
logFields, apiErr := pc.reader.GetLogFieldsFromNames(ctx, slices.Collect(maps.Keys(unspecifiedNames)))
if apiErr != nil {
slog.ErrorContext(ctx, "failed to fetch log fields for pipeline filter enrichment", "error", apiErr)
return pipelines, apiErr
}
// Build a simple name → AttributeKeyType map from the response.
fieldTypes := map[string]v3.AttributeKeyType{}
for _, f := range append(logFields.Selected, logFields.Interesting...) {
switch f.Type {
case constants.Resources:
fieldTypes[f.Name] = v3.AttributeKeyTypeResource
case constants.Attributes:
fieldTypes[f.Name] = v3.AttributeKeyTypeTag
}
}
// Set the resolved type on each untyped filter key in-place.
for i := range pipelines {
if pipelines[i].Filter != nil {
for j := range pipelines[i].Filter.Items {
key := &pipelines[i].Filter.Items[j].Key
if key.Type == v3.AttributeKeyTypeUnspecified {
// Skip static fields
if _, isStatic := constants.StaticFieldsLogsV3[key.Key]; isStatic {
continue
}
// Skip enrich body.* fields
if strings.HasPrefix(key.Key, "body.") {
continue
}
if t, ok := fieldTypes[key.Key]; ok {
key.Type = t
} else {
// default to attribute
key.Type = v3.AttributeKeyTypeTag
}
}
}
}
}
return pipelines, nil
}
// PipelinesResponse is used to prepare http response for pipelines config related requests
type PipelinesResponse struct {
*opamptypes.AgentConfigVersion
@@ -340,12 +256,7 @@ func (ic *LogParsingPipelineController) PreviewLogsPipelines(
ctx context.Context,
request *PipelinesPreviewRequest,
) (*PipelinesPreviewResponse, error) {
pipelines, err := ic.enrichPipelinesFilters(ctx, request.Pipelines)
if err != nil {
return nil, err
}
result, collectorLogs, err := SimulatePipelinesProcessing(ctx, pipelines, request.Logs)
result, collectorLogs, err := SimulatePipelinesProcessing(ctx, request.Pipelines, request.Logs)
if err != nil {
return nil, err
}
@@ -382,8 +293,10 @@ func (pc *LogParsingPipelineController) RecommendAgentConfig(
if configVersion != nil {
pipelinesVersion = configVersion.Version
}
ctx := context.Background()
pipelinesResp, err := pc.GetPipelinesByVersion(ctx, orgId, pipelinesVersion)
pipelinesResp, err := pc.GetPipelinesByVersion(
context.Background(), orgId, pipelinesVersion,
)
if err != nil {
return nil, "", err
}
@@ -393,17 +306,12 @@ func (pc *LogParsingPipelineController) RecommendAgentConfig(
return nil, "", errors.WrapInternalf(err, CodeRawPipelinesMarshalFailed, "could not serialize pipelines to JSON")
}
enrichedPipelines, err := pc.enrichPipelinesFilters(ctx, pipelinesResp.Pipelines)
if err != nil {
return nil, "", err
}
if querybuilder.BodyJSONQueryEnabled {
// add default normalize pipeline at the beginning, only for sending to collector
enrichedPipelines = append([]pipelinetypes.GettablePipeline{pc.getNormalizePipeline()}, enrichedPipelines...)
pipelinesResp.Pipelines = append([]pipelinetypes.GettablePipeline{pc.getNormalizePipeline()}, pipelinesResp.Pipelines...)
}
updatedConf, err := GenerateCollectorConfigWithPipelines(currentConfYaml, enrichedPipelines)
updatedConf, err := GenerateCollectorConfigWithPipelines(currentConfYaml, pipelinesResp.Pipelines)
if err != nil {
return nil, "", err
}

View File

@@ -1407,7 +1407,7 @@ func Test_querier_Traces_runWindowBasedListQueryDesc(t *testing.T) {
slog.Default(),
nil,
telemetryStore,
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore),
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore),
"",
time.Duration(time.Second),
nil,
@@ -1633,7 +1633,7 @@ func Test_querier_Traces_runWindowBasedListQueryAsc(t *testing.T) {
slog.Default(),
nil,
telemetryStore,
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore),
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore),
"",
time.Duration(time.Second),
nil,
@@ -1934,7 +1934,7 @@ func Test_querier_Logs_runWindowBasedListQueryDesc(t *testing.T) {
slog.Default(),
nil,
telemetryStore,
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore),
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore),
"",
time.Duration(time.Second),
nil,
@@ -2162,7 +2162,7 @@ func Test_querier_Logs_runWindowBasedListQueryAsc(t *testing.T) {
slog.Default(),
nil,
telemetryStore,
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore),
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore),
"",
time.Duration(time.Second),
nil,

View File

@@ -1459,7 +1459,7 @@ func Test_querier_Traces_runWindowBasedListQueryDesc(t *testing.T) {
slog.Default(),
nil,
telemetryStore,
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore),
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore),
"",
time.Duration(time.Second),
nil,
@@ -1685,7 +1685,7 @@ func Test_querier_Traces_runWindowBasedListQueryAsc(t *testing.T) {
slog.Default(),
nil,
telemetryStore,
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore),
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore),
"",
time.Duration(time.Second),
nil,
@@ -1985,7 +1985,7 @@ func Test_querier_Logs_runWindowBasedListQueryDesc(t *testing.T) {
slog.Default(),
nil,
telemetryStore,
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore),
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore),
"",
time.Duration(time.Second),
nil,
@@ -2213,7 +2213,7 @@ func Test_querier_Logs_runWindowBasedListQueryAsc(t *testing.T) {
slog.Default(),
nil,
telemetryStore,
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore),
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore),
"",
time.Duration(time.Second),
nil,

View File

@@ -121,7 +121,6 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
signoz.SQLStore,
integrationsController.GetPipelinesForInstalledIntegrations,
reader,
)
if err != nil {
return nil, err

View File

@@ -1,7 +1,6 @@
package tracedetail
import (
"maps"
"slices"
"sort"
@@ -64,36 +63,32 @@ func findIndexForSelectedSpanFromPreOrder(spans []*model.Span, selectedSpanId st
return selectedSpanIndex
}
func getPathFromRootToSelectedSpanId(node *model.Span, selectedSpanId string) (bool, []string) {
func getPathFromRootToSelectedSpanId(node *model.Span, selectedSpanId string, uncollapsedSpans []string, isSelectedSpanIDUnCollapsed bool) (bool, []string) {
spansFromRootToNode := []string{}
spansFromRootToNode = append(spansFromRootToNode, node.SpanID)
if node.SpanID == selectedSpanId {
if isSelectedSpanIDUnCollapsed {
spansFromRootToNode = append(spansFromRootToNode, node.SpanID)
}
return true, spansFromRootToNode
}
isPresentInSubtreeForTheNode := false
for _, child := range node.Children {
isPresentInThisSubtree, _spansFromRootToNode := getPathFromRootToSelectedSpanId(child, selectedSpanId)
isPresentInThisSubtree, _spansFromRootToNode := getPathFromRootToSelectedSpanId(child, selectedSpanId, uncollapsedSpans, isSelectedSpanIDUnCollapsed)
// if the interested node is present in the given subtree then add the span node to uncollapsed node list
if isPresentInThisSubtree {
if !slices.Contains(uncollapsedSpans, node.SpanID) {
spansFromRootToNode = append(spansFromRootToNode, node.SpanID)
}
isPresentInSubtreeForTheNode = true
spansFromRootToNode = append(spansFromRootToNode, _spansFromRootToNode...)
break
}
}
return isPresentInSubtreeForTheNode, spansFromRootToNode
}
// traverseOpts holds the traversal configuration that remains constant
// throughout the recursion. Per-call state (level, isPartOfPreOrder, etc.)
// is passed as direct arguments.
type traverseOpts struct {
uncollapsedSpans map[string]struct{}
selectedSpanID string
}
func traverseTrace(span *model.Span, opts traverseOpts, level uint64, isPartOfPreOrder bool, hasSibling bool) []*model.Span {
func traverseTrace(span *model.Span, uncollapsedSpans []string, level uint64, isPartOfPreOrder bool, hasSibling bool, selectedSpanId string) []*model.Span {
preOrderTraversal := []*model.Span{}
// sort the children to maintain the order across requests
@@ -131,9 +126,8 @@ func traverseTrace(span *model.Span, opts traverseOpts, level uint64, isPartOfPr
preOrderTraversal = append(preOrderTraversal, &nodeWithoutChildren)
}
_, isAlreadyUncollapsed := opts.uncollapsedSpans[span.SpanID]
for index, child := range span.Children {
_childTraversal := traverseTrace(child, opts, level+1, isPartOfPreOrder && isAlreadyUncollapsed, index != (len(span.Children)-1))
_childTraversal := traverseTrace(child, uncollapsedSpans, level+1, isPartOfPreOrder && slices.Contains(uncollapsedSpans, span.SpanID), index != (len(span.Children)-1), selectedSpanId)
preOrderTraversal = append(preOrderTraversal, _childTraversal...)
nodeWithoutChildren.SubTreeNodeCount += child.SubTreeNodeCount + 1
span.SubTreeNodeCount += child.SubTreeNodeCount + 1
@@ -166,31 +160,15 @@ func GetSelectedSpans(uncollapsedSpans []string, selectedSpanID string, traceRoo
var preOrderTraversal = make([]*model.Span, 0)
var rootServiceName, rootServiceEntryPoint string
// create a map of uncollapsed spans for quick lookup
uncollapsedSpanMap := make(map[string]struct{})
for _, spanID := range uncollapsedSpans {
uncollapsedSpanMap[spanID] = struct{}{}
}
updatedUncollapsedSpans := uncollapsedSpans
selectedSpanIndex := -1
for _, rootSpanID := range traceRoots {
if rootNode, exists := spanIdToSpanNodeMap[rootSpanID.SpanID]; exists {
present, spansFromRootToNode := getPathFromRootToSelectedSpanId(rootNode, selectedSpanID)
if present {
for _, spanID := range spansFromRootToNode {
if selectedSpanID == spanID && !isSelectedSpanIDUnCollapsed {
continue
}
uncollapsedSpanMap[spanID] = struct{}{}
}
}
_, spansFromRootToNode := getPathFromRootToSelectedSpanId(rootNode, selectedSpanID, updatedUncollapsedSpans, isSelectedSpanIDUnCollapsed)
updatedUncollapsedSpans = append(updatedUncollapsedSpans, spansFromRootToNode...)
opts := traverseOpts{
uncollapsedSpans: uncollapsedSpanMap,
selectedSpanID: selectedSpanID,
}
_preOrderTraversal := traverseTrace(rootNode, opts, 0, true, false)
_preOrderTraversal := traverseTrace(rootNode, updatedUncollapsedSpans, 0, true, false, selectedSpanID)
_selectedSpanIndex := findIndexForSelectedSpanFromPreOrder(_preOrderTraversal, selectedSpanID)
if _selectedSpanIndex != -1 {
@@ -232,5 +210,5 @@ func GetSelectedSpans(uncollapsedSpans []string, selectedSpanID string, traceRoo
startIndex = 0
}
return preOrderTraversal[startIndex:endIndex], slices.Collect(maps.Keys(uncollapsedSpanMap)), rootServiceName, rootServiceEntryPoint
return preOrderTraversal[startIndex:endIndex], updatedUncollapsedSpans, rootServiceName, rootServiceEntryPoint
}

View File

@@ -1,355 +0,0 @@
// Package tracedetail tests — waterfall
//
// # Background
//
// The waterfall view renders a trace as a scrollable list of spans in
// pre-order (parent before children, siblings left-to-right). Because a trace
// can have thousands of spans, only a window of ~500 is returned per request.
// The window is centred on the selected span.
//
// # Key concepts
//
// uncollapsedSpans
//
// The set of span IDs the user has manually expanded in the UI.
// Only the direct children of an uncollapsed span are included in the
// output; grandchildren stay hidden until their parent is also uncollapsed.
// When multiple spans are uncollapsed their children are all visible at once.
//
// selectedSpanID
//
// The span currently focused — set when the user clicks a span in the
// waterfall or selects one from the flamegraph. The output window is always
// centred on this span. The path from the trace root down to the selected
// span is automatically uncollapsed so ancestors are visible even if they are
// not in uncollapsedSpans.
//
// isSelectedSpanIDUnCollapsed
//
// Controls whether the selected span's own children are shown:
// true — user expanded the span (click-to-open in waterfall or flamegraph);
// direct children of the selected span are included.
// false — user selected without expanding;
// the span is visible but its children remain hidden.
//
// traceRoots
//
// Root spans of the trace — spans with no parent in the current dataset.
// Normally one, but multiple roots are common when upstream services are
// not instrumented or their spans were not sampled/exported.
package tracedetail
import (
"fmt"
"testing"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/stretchr/testify/assert"
)
// Pre-order traversal is preserved: parent before children, siblings left-to-right.
func TestGetSelectedSpans_PreOrderTraversal(t *testing.T) {
root := mkSpan("root", "svc",
mkSpan("child1", "svc", mkSpan("grandchild", "svc")),
mkSpan("child2", "svc"),
)
spanMap := buildSpanMap(root)
spans, _, _, _ := GetSelectedSpans([]string{"root", "child1"}, "root", []*model.Span{root}, spanMap, false)
assert.Equal(t, []string{"root", "child1", "grandchild", "child2"}, spanIDs(spans))
}
// Multiple roots: both trees are flattened into a single pre-order list with
// root1's subtree before root2's. Service/entry-point come from the first root.
//
// root1 svc-a ← selected
// └─ child1
// root2 svc-b
// └─ child2
//
// Expected output order: root1 → child1 → root2 → child2
func TestGetSelectedSpans_MultipleRoots(t *testing.T) {
root1 := mkSpan("root1", "svc-a", mkSpan("child1", "svc-a"))
root2 := mkSpan("root2", "svc-b", mkSpan("child2", "svc-b"))
spanMap := buildSpanMap(root1, root2)
spans, _, svcName, entryPoint := GetSelectedSpans([]string{"root1", "root2"}, "root1", []*model.Span{root1, root2}, spanMap, false)
assert.Equal(t, []string{"root1", "child1", "root2", "child2"}, spanIDs(spans), "root1 subtree must precede root2 subtree")
assert.Equal(t, "svc-a", svcName, "metadata comes from first root")
assert.Equal(t, "root1-op", entryPoint, "metadata comes from first root")
}
// isSelectedSpanIDUnCollapsed=true opens only the selected span's direct children,
// not deeper descendants.
//
// root → selected (expanded)
// ├─ child1 ✓
// │ └─ grandchild ✗ (only one level opened)
// └─ child2 ✓
func TestGetSelectedSpans_ExpandedSelectedSpan(t *testing.T) {
root := mkSpan("root", "svc",
mkSpan("selected", "svc",
mkSpan("child1", "svc", mkSpan("grandchild", "svc")),
mkSpan("child2", "svc"),
),
)
spanMap := buildSpanMap(root)
spans, _, _, _ := GetSelectedSpans([]string{}, "selected", []*model.Span{root}, spanMap, true)
// root and selected are on the auto-uncollapsed path; child1/child2 are direct
// children of the expanded selected span; grandchild stays hidden.
assert.Equal(t, []string{"root", "selected", "child1", "child2"}, spanIDs(spans))
}
// Multiple spans uncollapsed simultaneously: children of all uncollapsed spans
// are visible at once.
//
// root
// ├─ childA (uncollapsed) → grandchildA ✓
// └─ childB (uncollapsed) → grandchildB ✓
func TestGetSelectedSpans_MultipleUncollapsed(t *testing.T) {
root := mkSpan("root", "svc",
mkSpan("childA", "svc", mkSpan("grandchildA", "svc")),
mkSpan("childB", "svc", mkSpan("grandchildB", "svc")),
)
spanMap := buildSpanMap(root)
spans, _, _, _ := GetSelectedSpans([]string{"root", "childA", "childB"}, "root", []*model.Span{root}, spanMap, false)
assert.Equal(t, []string{"root", "childA", "grandchildA", "childB", "grandchildB"}, spanIDs(spans))
}
// Collapsing a span with other uncollapsed spans
//
// root
// ├─ childA (previously expanded — in uncollapsedSpans)
// │ ├─ grandchild1 ✓
// │ │ └─ greatGrandchild ✗ (grandchild1 not in uncollapsedSpans)
// │ └─ grandchild2 ✓
// └─ childB ← selected (not expanded)
func TestGetSelectedSpans_ManualUncollapse(t *testing.T) {
root := mkSpan("root", "svc",
mkSpan("childA", "svc",
mkSpan("grandchild1", "svc", mkSpan("greatGrandchild", "svc")),
mkSpan("grandchild2", "svc"),
),
mkSpan("childB", "svc"),
)
spanMap := buildSpanMap(root)
// childA was expanded in a previous interaction; childB is now selected without expanding
spans, _, _, _ := GetSelectedSpans([]string{"childA"}, "childB", []*model.Span{root}, spanMap, false)
// path to childB auto-uncollpases root → childA and childB appear; childA is in
// uncollapsedSpans so its children appear; greatGrandchild stays hidden.
assert.Equal(t, []string{"root", "childA", "grandchild1", "grandchild2", "childB"}, spanIDs(spans))
}
// A collapsed span hides all children.
func TestGetSelectedSpans_CollapsedSpan(t *testing.T) {
root := mkSpan("root", "svc",
mkSpan("child1", "svc"),
mkSpan("child2", "svc"),
)
spanMap := buildSpanMap(root)
spans, _, _, _ := GetSelectedSpans([]string{}, "root", []*model.Span{root}, spanMap, false)
assert.Equal(t, []string{"root"}, spanIDs(spans))
}
// Selecting a span auto-uncollpases the path from root to that span so it is visible.
//
// root → parent → selected
func TestGetSelectedSpans_PathToSelectedIsUncollapsed(t *testing.T) {
root := mkSpan("root", "svc",
mkSpan("parent", "svc",
mkSpan("selected", "svc"),
),
)
spanMap := buildSpanMap(root)
// no manually uncollapsed spans — path should still be opened
spans, _, _, _ := GetSelectedSpans([]string{}, "selected", []*model.Span{root}, spanMap, false)
assert.Equal(t, []string{"root", "parent", "selected"}, spanIDs(spans))
}
// The path-to-selected spans are returned in updatedUncollapsedSpans.
func TestGetSelectedSpans_PathReturnedInUncollapsed(t *testing.T) {
root := mkSpan("root", "svc",
mkSpan("parent", "svc",
mkSpan("selected", "svc"),
),
)
spanMap := buildSpanMap(root)
spans, uncollapsed, _, _ := GetSelectedSpans([]string{}, "selected", []*model.Span{root}, spanMap, false)
assert.Equal(t, []string{"root", "parent"}, uncollapsed)
assert.Equal(t, []string{"root", "parent", "selected"}, spanIDs(spans))
}
// Siblings of ancestors are rendered as collapsed nodes but their subtrees
// must NOT be expanded.
//
// root
// ├─ unrelated → unrelated-child (✗)
// └─ parent → selected
func TestGetSelectedSpans_SiblingsNotExpanded(t *testing.T) {
root := mkSpan("root", "svc",
mkSpan("unrelated", "svc", mkSpan("unrelated-child", "svc")),
mkSpan("parent", "svc",
mkSpan("selected", "svc"),
),
)
spanMap := buildSpanMap(root)
spans, uncollapsed, _, _ := GetSelectedSpans([]string{}, "selected", []*model.Span{root}, spanMap, false)
// children of root sort alphabetically: parent < unrelated; unrelated-child stays hidden
assert.Equal(t, []string{"root", "parent", "selected", "unrelated"}, spanIDs(spans))
// only the path nodes are tracked as uncollapsed — unrelated is not
assert.Equal(t, []string{"root", "parent"}, uncollapsed)
}
// An unknown selectedSpanID must not panic; returns a window from index 0.
func TestGetSelectedSpans_UnknownSelectedSpan(t *testing.T) {
root := mkSpan("root", "svc", mkSpan("child", "svc"))
spanMap := buildSpanMap(root)
spans, _, _, _ := GetSelectedSpans([]string{}, "nonexistent", []*model.Span{root}, spanMap, false)
assert.Equal(t, []string{"root"}, spanIDs(spans))
}
// Test to check if Level, HasChildren, HasSiblings, and SubTreeNodeCount are populated correctly.
//
// root level=0, hasChildren=true, hasSiblings=false, subTree=4
// child1 level=1, hasChildren=true, hasSiblings=true, subTree=2
// grandchild level=2, hasChildren=false, hasSiblings=false, subTree=1
// child2 level=1, hasChildren=false, hasSiblings=false, subTree=1
func TestGetSelectedSpans_SpanMetadata(t *testing.T) {
root := mkSpan("root", "svc",
mkSpan("child1", "svc", mkSpan("grandchild", "svc")),
mkSpan("child2", "svc"),
)
spanMap := buildSpanMap(root)
spans, _, _, _ := GetSelectedSpans([]string{"root", "child1"}, "root", []*model.Span{root}, spanMap, false)
byID := map[string]*model.Span{}
for _, s := range spans {
byID[s.SpanID] = s
}
assert.Equal(t, uint64(0), byID["root"].Level)
assert.Equal(t, uint64(1), byID["child1"].Level)
assert.Equal(t, uint64(1), byID["child2"].Level)
assert.Equal(t, uint64(2), byID["grandchild"].Level)
assert.True(t, byID["root"].HasChildren)
assert.True(t, byID["child1"].HasChildren)
assert.False(t, byID["child2"].HasChildren)
assert.False(t, byID["grandchild"].HasChildren)
assert.False(t, byID["root"].HasSiblings, "root has no siblings")
assert.True(t, byID["child1"].HasSiblings, "child1 has sibling child2")
assert.False(t, byID["child2"].HasSiblings, "child2 is the last child")
assert.False(t, byID["grandchild"].HasSiblings, "grandchild has no siblings")
assert.Equal(t, uint64(4), byID["root"].SubTreeNodeCount)
assert.Equal(t, uint64(2), byID["child1"].SubTreeNodeCount)
assert.Equal(t, uint64(1), byID["grandchild"].SubTreeNodeCount)
assert.Equal(t, uint64(1), byID["child2"].SubTreeNodeCount)
}
// If the selected span is already in uncollapsedSpans AND isSelectedSpanIDUnCollapsed=true,
func TestGetSelectedSpans_DuplicateInUncollapsed(t *testing.T) {
root := mkSpan("root", "svc",
mkSpan("selected", "svc", mkSpan("child", "svc")),
)
spanMap := buildSpanMap(root)
_, uncollapsed, _, _ := GetSelectedSpans(
[]string{"selected"}, // already present
"selected",
[]*model.Span{root}, spanMap,
true,
)
count := 0
for _, id := range uncollapsed {
if id == "selected" {
count++
}
}
assert.Equal(t, 1, count, "should appear once")
}
// makeChain builds a linear trace: span0 → span1 → … → span(n-1).
// All span IDs are "span0", "span1", … so the caller can reference them by index.
func makeChain(n int) (*model.Span, map[string]*model.Span, []string) {
spans := make([]*model.Span, n)
for i := n - 1; i >= 0; i-- {
if i == n-1 {
spans[i] = mkSpan(fmt.Sprintf("span%d", i), "svc")
} else {
spans[i] = mkSpan(fmt.Sprintf("span%d", i), "svc", spans[i+1])
}
}
uncollapsed := make([]string, n)
for i := range spans {
uncollapsed[i] = fmt.Sprintf("span%d", i)
}
return spans[0], buildSpanMap(spans[0]), uncollapsed
}
// The selected span is centred: 200 spans before it, 300 after (0.4 / 0.6 split).
func TestGetSelectedSpans_WindowCentredOnSelected(t *testing.T) {
root, spanMap, uncollapsed := makeChain(600)
spans, _, _, _ := GetSelectedSpans(uncollapsed, "span300", []*model.Span{root}, spanMap, false)
assert.Equal(t, 500, len(spans), "window should be 500 spans")
// window is [100, 600): span300 lands at position 200 (300 - 100)
assert.Equal(t, "span100", spans[0].SpanID, "window starts 200 before selected")
assert.Equal(t, "span300", spans[200].SpanID, "selected span at position 200 in window")
assert.Equal(t, "span599", spans[499].SpanID, "window ends 300 after selected")
}
// When the selected span is near the start, the window shifts right so no
// negative index is used — the result is still 500 spans.
func TestGetSelectedSpans_WindowShiftsAtStart(t *testing.T) {
root, spanMap, uncollapsed := makeChain(600)
spans, _, _, _ := GetSelectedSpans(uncollapsed, "span10", []*model.Span{root}, spanMap, false)
assert.Equal(t, 500, len(spans))
assert.Equal(t, "span0", spans[0].SpanID, "window clamped to start of trace")
assert.Equal(t, "span10", spans[10].SpanID, "selected span still in window")
}
func mkSpan(id, service string, children ...*model.Span) *model.Span {
return &model.Span{
SpanID: id,
ServiceName: service,
Name: id + "-op",
Children: children,
}
}
// spanIDs returns SpanIDs in order.
func spanIDs(spans []*model.Span) []string {
ids := make([]string, len(spans))
for i, s := range spans {
ids[i] = s.SpanID
}
return ids
}
// buildSpanMap indexes every span in a set of trees by SpanID.
func buildSpanMap(roots ...*model.Span) map[string]*model.Span {
m := map[string]*model.Span{}
var walk func(*model.Span)
walk = func(s *model.Span) {
m[s.SpanID] = s
for _, c := range s.Children {
walk(c)
}
}
for _, r := range roots {
walk(r)
}
return m
}

View File

@@ -698,7 +698,7 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
slog.Default(),
nil,
telemetryStore,
prometheustest.New(context.Background(), settings, prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore),
prometheustest.New(context.Background(), settings, prometheus.Config{}, telemetryStore),
"",
time.Second,
nil,

View File

@@ -253,7 +253,7 @@ func TestManager_TestNotification_SendUnmatched_PromRule(t *testing.T) {
WillReturnRows(samplesRows)
// Create Prometheus provider for this test
promProvider = prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, store)
promProvider = prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, store)
},
ManagerOptionsHook: func(opts *ManagerOptions) {
// Set Prometheus provider for PromQL queries

View File

@@ -99,7 +99,7 @@ func NewTestManager(t *testing.T, testOpts *TestManagerOptions) *Manager {
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
providerSettings := instrumentationtest.New().ToProviderSettings()
prometheus := prometheustest.New(context.Background(), providerSettings, prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore)
prometheus := prometheustest.New(context.Background(), providerSettings, prometheus.Config{}, telemetryStore)
reader := clickhouseReader.NewReader(
instrumentationtest.New().Logger(),
nil,

View File

@@ -940,7 +940,7 @@ func TestPromRuleUnitCombinations(t *testing.T) {
).
WillReturnRows(samplesRows)
promProvider := prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore)
promProvider := prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore)
postableRule.RuleCondition.CompareOp = ruletypes.CompareOp(c.compareOp)
postableRule.RuleCondition.MatchType = ruletypes.MatchType(c.matchType)
@@ -1061,7 +1061,7 @@ func _Enable_this_after_9146_issue_fix_is_merged_TestPromRuleNoData(t *testing.T
WithArgs("test_metric", "__name__", "test_metric").
WillReturnRows(fingerprintRows)
promProvider := prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore)
promProvider := prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore)
var target float64 = 0
postableRule.RuleCondition.Thresholds = &ruletypes.RuleThresholdData{
@@ -1281,7 +1281,7 @@ func TestMultipleThresholdPromRule(t *testing.T) {
).
WillReturnRows(samplesRows)
promProvider := prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore)
promProvider := prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore)
postableRule.RuleCondition.CompareOp = ruletypes.CompareOp(c.compareOp)
postableRule.RuleCondition.MatchType = ruletypes.MatchType(c.matchType)
@@ -1441,7 +1441,7 @@ func TestPromRule_NoData(t *testing.T) {
promProvider := prometheustest.New(
context.Background(),
instrumentationtest.New().ToProviderSettings(),
prometheus.Config{Timeout: 2 * time.Minute},
prometheus.Config{},
telemetryStore,
)
defer func() {
@@ -1590,7 +1590,7 @@ func TestPromRule_NoData_AbsentFor(t *testing.T) {
promProvider := prometheustest.New(
context.Background(),
instrumentationtest.New().ToProviderSettings(),
prometheus.Config{Timeout: 2 * time.Minute},
prometheus.Config{},
telemetryStore,
)
defer func() {
@@ -1748,7 +1748,7 @@ func TestPromRuleEval_RequireMinPoints(t *testing.T) {
promProvider := prometheustest.New(
context.Background(),
instrumentationtest.New().ToProviderSettings(),
prometheus.Config{Timeout: 2 * time.Minute, LookbackDelta: lookBackDelta},
prometheus.Config{LookbackDelta: lookBackDelta},
telemetryStore,
)
defer func() {

View File

@@ -779,7 +779,7 @@ func TestThresholdRuleUnitCombinations(t *testing.T) {
},
)
require.NoError(t, err)
reader := clickhouseReader.NewReader(slog.Default(), nil, telemetryStore, prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore), "", time.Duration(time.Second), nil, readerCache, options)
reader := clickhouseReader.NewReader(slog.Default(), nil, telemetryStore, prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore), "", time.Duration(time.Second), nil, readerCache, options)
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, reader, nil, logger)
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
"signoz_calls_total": {
@@ -894,7 +894,7 @@ func TestThresholdRuleNoData(t *testing.T) {
)
assert.NoError(t, err)
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
reader := clickhouseReader.NewReader(slog.Default(), nil, telemetryStore, prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore), "", time.Duration(time.Second), nil, readerCache, options)
reader := clickhouseReader.NewReader(slog.Default(), nil, telemetryStore, prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore), "", time.Duration(time.Second), nil, readerCache, options)
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, reader, nil, logger)
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
@@ -1014,7 +1014,7 @@ func TestThresholdRuleTracesLink(t *testing.T) {
}
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
reader := clickhouseReader.NewReader(slog.Default(), nil, telemetryStore, prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore), "", time.Duration(time.Second), nil, nil, options)
reader := clickhouseReader.NewReader(slog.Default(), nil, telemetryStore, prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore), "", time.Duration(time.Second), nil, nil, options)
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, reader, nil, logger)
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
@@ -1151,7 +1151,7 @@ func TestThresholdRuleLogsLink(t *testing.T) {
}
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
reader := clickhouseReader.NewReader(slog.Default(), nil, telemetryStore, prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore), "", time.Duration(time.Second), nil, nil, options)
reader := clickhouseReader.NewReader(slog.Default(), nil, telemetryStore, prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore), "", time.Duration(time.Second), nil, nil, options)
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, reader, nil, logger)
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
@@ -1418,7 +1418,7 @@ func TestMultipleThresholdRule(t *testing.T) {
},
)
require.NoError(t, err)
reader := clickhouseReader.NewReader(slog.Default(), nil, telemetryStore, prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore), "", time.Second, nil, readerCache, options)
reader := clickhouseReader.NewReader(slog.Default(), nil, telemetryStore, prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore), "", time.Second, nil, readerCache, options)
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, reader, nil, logger)
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
"signoz_calls_total": {
@@ -2220,7 +2220,7 @@ func TestThresholdEval_RequireMinPoints(t *testing.T) {
)
require.NoError(t, err)
prometheusProvider := prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore)
prometheusProvider := prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore)
reader := clickhouseReader.NewReader(slog.Default(), nil, telemetryStore, prometheusProvider, "", time.Second, nil, readerCache, options)
rule, err := NewThresholdRule("some-id", valuer.GenerateUUID(), &postableRule, reader, nil, logger)