mirror of
https://github.com/SigNoz/signoz.git
synced 2026-02-15 13:42:35 +00:00
Compare commits
2 Commits
fix-issues
...
remove-v4-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
25e0c19ddb | ||
|
|
8a6abb2f09 |
@@ -176,6 +176,25 @@ Wir haben Benchmarks veröffentlicht, die Loki mit SigNoz vergleichen. Schauen S
|
||||
Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md), durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
|
||||
Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem #contributing Kanal in unserer [slack community](https://signoz.io/slack)
|
||||
|
||||
### Unsere Projektbetreuer
|
||||
|
||||
#### Backend
|
||||
|
||||
- [Ankit Nayan](https://github.com/ankitnayan)
|
||||
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
||||
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
||||
- [Vishal Sharma](https://github.com/makeavish)
|
||||
|
||||
#### Frontend
|
||||
|
||||
- [Palash Gupta](https://github.com/palashgdev)
|
||||
- [Yunus M](https://github.com/YounixM)
|
||||
- [Rajat Dabade](https://github.com/Rajat-Dabade)
|
||||
|
||||
#### DevOps
|
||||
|
||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||
|
||||
<br /><br />
|
||||
|
||||
## Dokumentation
|
||||
|
||||
28
README.md
28
README.md
@@ -221,6 +221,34 @@ We ❤️ contributions big or small. Please read [CONTRIBUTING.md](CONTRIBUTING
|
||||
|
||||
Not sure how to get started? Just ping us on `#contributing` in our [slack community](https://signoz.io/slack)
|
||||
|
||||
### Project maintainers
|
||||
|
||||
#### Backend
|
||||
|
||||
- [Ankit Nayan](https://github.com/ankitnayan)
|
||||
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
||||
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
||||
- [Vishal Sharma](https://github.com/makeavish)
|
||||
- [Shivanshu Raj Shrivastava](https://github.com/shivanshuraj1333)
|
||||
- [Ekansh Gupta](https://github.com/eKuG)
|
||||
- [Aniket Agarwal](https://github.com/aniketio-ctrl)
|
||||
|
||||
#### Frontend
|
||||
|
||||
- [Yunus M](https://github.com/YounixM)
|
||||
- [Vikrant Gupta](https://github.com/vikrantgupta25)
|
||||
- [Sagar Rajput](https://github.com/SagarRajput-7)
|
||||
- [Shaheer Kochai](https://github.com/ahmadshaheer)
|
||||
- [Amlan Kumar Nandy](https://github.com/amlannandy)
|
||||
- [Sahil Khan](https://github.com/sawhil)
|
||||
- [Aditya Singh](https://github.com/aks07)
|
||||
- [Abhi Kumar](https://github.com/ahrefabhi)
|
||||
|
||||
#### DevOps
|
||||
|
||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||
- [Vibhu Pandey](https://github.com/therealpandey)
|
||||
|
||||
<br /><br />
|
||||
|
||||
|
||||
|
||||
@@ -187,6 +187,25 @@ Jaeger 仅仅是一个分布式追踪系统。 但是 SigNoz 可以提供 metric
|
||||
|
||||
如果你不知道如何开始? 只需要在 [slack 社区](https://signoz.io/slack) 通过 `#contributing` 频道联系我们。
|
||||
|
||||
### 项目维护人员
|
||||
|
||||
#### 后端
|
||||
|
||||
- [Ankit Nayan](https://github.com/ankitnayan)
|
||||
- [Nityananda Gohain](https://github.com/nityanandagohain)
|
||||
- [Srikanth Chekuri](https://github.com/srikanthccv)
|
||||
- [Vishal Sharma](https://github.com/makeavish)
|
||||
|
||||
#### 前端
|
||||
|
||||
- [Palash Gupta](https://github.com/palashgdev)
|
||||
- [Yunus M](https://github.com/YounixM)
|
||||
- [Rajat Dabade](https://github.com/Rajat-Dabade)
|
||||
|
||||
#### 运维开发
|
||||
|
||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||
|
||||
<br /><br />
|
||||
|
||||
## 文档
|
||||
|
||||
@@ -285,6 +285,7 @@ flagger:
|
||||
config:
|
||||
boolean:
|
||||
use_span_metrics: true
|
||||
interpolation_enabled: false
|
||||
kafka_span_eval: false
|
||||
string:
|
||||
float:
|
||||
|
||||
@@ -5,23 +5,16 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/anomaly"
|
||||
"github.com/SigNoz/signoz/pkg/cache"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/common"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/transition"
|
||||
"github.com/SigNoz/signoz/pkg/types/ruletypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
|
||||
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/times"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/timestamp"
|
||||
@@ -32,6 +25,8 @@ import (
|
||||
|
||||
querierV5 "github.com/SigNoz/signoz/pkg/querier"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/anomaly"
|
||||
|
||||
anomalyV2 "github.com/SigNoz/signoz/ee/anomaly"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
@@ -48,17 +43,12 @@ type AnomalyRule struct {
|
||||
|
||||
reader interfaces.Reader
|
||||
|
||||
// querierV2 is used for alerts created after the introduction of new metrics query builder
|
||||
querierV2 interfaces.Querier
|
||||
|
||||
// querierV5 is used for alerts migrated after the introduction of new query builder
|
||||
// querierV5 is the query builder v5 querier used for all alert rule evaluation
|
||||
querierV5 querierV5.Querier
|
||||
|
||||
provider anomaly.Provider
|
||||
providerV2 anomalyV2.Provider
|
||||
|
||||
version string
|
||||
logger *slog.Logger
|
||||
logger *slog.Logger
|
||||
|
||||
seasonality anomaly.Seasonality
|
||||
}
|
||||
@@ -102,34 +92,6 @@ func NewAnomalyRule(
|
||||
|
||||
logger.Info("using seasonality", "seasonality", t.seasonality.String())
|
||||
|
||||
querierOptsV2 := querierV2.QuerierOptions{
|
||||
Reader: reader,
|
||||
Cache: cache,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
}
|
||||
|
||||
t.querierV2 = querierV2.NewQuerier(querierOptsV2)
|
||||
t.reader = reader
|
||||
if t.seasonality == anomaly.SeasonalityHourly {
|
||||
t.provider = anomaly.NewHourlyProvider(
|
||||
anomaly.WithCache[*anomaly.HourlyProvider](cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.HourlyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.HourlyProvider](reader),
|
||||
)
|
||||
} else if t.seasonality == anomaly.SeasonalityDaily {
|
||||
t.provider = anomaly.NewDailyProvider(
|
||||
anomaly.WithCache[*anomaly.DailyProvider](cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.DailyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.DailyProvider](reader),
|
||||
)
|
||||
} else if t.seasonality == anomaly.SeasonalityWeekly {
|
||||
t.provider = anomaly.NewWeeklyProvider(
|
||||
anomaly.WithCache[*anomaly.WeeklyProvider](cache),
|
||||
anomaly.WithKeyGenerator[*anomaly.WeeklyProvider](queryBuilder.NewKeyGenerator()),
|
||||
anomaly.WithReader[*anomaly.WeeklyProvider](reader),
|
||||
)
|
||||
}
|
||||
|
||||
if t.seasonality == anomaly.SeasonalityHourly {
|
||||
t.providerV2 = anomalyV2.NewHourlyProvider(
|
||||
anomalyV2.WithQuerier[*anomalyV2.HourlyProvider](querierV5),
|
||||
@@ -148,7 +110,7 @@ func NewAnomalyRule(
|
||||
}
|
||||
|
||||
t.querierV5 = querierV5
|
||||
t.version = p.Version
|
||||
t.reader = reader
|
||||
t.logger = logger
|
||||
return &t, nil
|
||||
}
|
||||
@@ -157,36 +119,9 @@ func (r *AnomalyRule) Type() ruletypes.RuleType {
|
||||
return RuleTypeAnomaly
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) prepareQueryRange(ctx context.Context, ts time.Time) (*v3.QueryRangeParamsV3, error) {
|
||||
func (r *AnomalyRule) prepareQueryRange(ctx context.Context, ts time.Time) (*qbtypes.QueryRangeRequest, error) {
|
||||
|
||||
r.logger.InfoContext(
|
||||
ctx, "prepare query range request v4", "ts", ts.UnixMilli(), "eval_window", r.EvalWindow().Milliseconds(), "eval_delay", r.EvalDelay().Milliseconds(),
|
||||
)
|
||||
|
||||
st, en := r.Timestamps(ts)
|
||||
start := st.UnixMilli()
|
||||
end := en.UnixMilli()
|
||||
|
||||
compositeQuery := r.Condition().CompositeQuery
|
||||
|
||||
if compositeQuery.PanelType != v3.PanelTypeGraph {
|
||||
compositeQuery.PanelType = v3.PanelTypeGraph
|
||||
}
|
||||
|
||||
// default mode
|
||||
return &v3.QueryRangeParamsV3{
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: int64(math.Max(float64(common.MinAllowedStepInterval(start, end)), 60)),
|
||||
CompositeQuery: compositeQuery,
|
||||
Variables: make(map[string]interface{}, 0),
|
||||
NoCache: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) prepareQueryRangeV5(ctx context.Context, ts time.Time) (*qbtypes.QueryRangeRequest, error) {
|
||||
|
||||
r.logger.InfoContext(ctx, "prepare query range request v5", "ts", ts.UnixMilli(), "eval_window", r.EvalWindow().Milliseconds(), "eval_delay", r.EvalDelay().Milliseconds())
|
||||
r.logger.InfoContext(ctx, "prepare query range request", "ts", ts.UnixMilli(), "eval_window", r.EvalWindow().Milliseconds(), "eval_delay", r.EvalDelay().Milliseconds())
|
||||
|
||||
startTs, endTs := r.Timestamps(ts)
|
||||
start, end := startTs.UnixMilli(), endTs.UnixMilli()
|
||||
@@ -215,60 +150,6 @@ func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID, t
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = r.PopulateTemporality(ctx, orgID, params)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("internal error while setting temporality")
|
||||
}
|
||||
|
||||
anomalies, err := r.provider.GetAnomalies(ctx, orgID, &anomaly.GetAnomaliesRequest{
|
||||
Params: params,
|
||||
Seasonality: r.seasonality,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var queryResult *v3.Result
|
||||
for _, result := range anomalies.Results {
|
||||
if result.QueryName == r.GetSelectedQuery() {
|
||||
queryResult = result
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
hasData := len(queryResult.AnomalyScores) > 0
|
||||
if missingDataAlert := r.HandleMissingDataAlert(ctx, ts, hasData); missingDataAlert != nil {
|
||||
return ruletypes.Vector{*missingDataAlert}, nil
|
||||
}
|
||||
|
||||
var resultVector ruletypes.Vector
|
||||
|
||||
scoresJSON, _ := json.Marshal(queryResult.AnomalyScores)
|
||||
r.logger.InfoContext(ctx, "anomaly scores", "scores", string(scoresJSON))
|
||||
|
||||
for _, series := range queryResult.AnomalyScores {
|
||||
if !r.Condition().ShouldEval(series) {
|
||||
r.logger.InfoContext(ctx, "not enough data points to evaluate series, skipping", "ruleid", r.ID(), "numPoints", len(series.Points), "requiredPoints", r.Condition().RequiredNumPoints)
|
||||
continue
|
||||
}
|
||||
results, err := r.Threshold.Eval(*series, r.Unit(), ruletypes.EvalData{
|
||||
ActiveAlerts: r.ActiveAlertsLabelFP(),
|
||||
SendUnmatched: r.ShouldSendUnmatched(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resultVector = append(resultVector, results...)
|
||||
}
|
||||
return resultVector, nil
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) buildAndRunQueryV5(ctx context.Context, orgID valuer.UUID, ts time.Time) (ruletypes.Vector, error) {
|
||||
|
||||
params, err := r.prepareQueryRangeV5(ctx, ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
anomalies, err := r.providerV2.GetAnomalies(ctx, orgID, &anomalyV2.AnomaliesRequest{
|
||||
Params: *params,
|
||||
@@ -290,20 +171,25 @@ func (r *AnomalyRule) buildAndRunQueryV5(ctx context.Context, orgID valuer.UUID,
|
||||
r.logger.WarnContext(ctx, "nil qb result", "ts", ts.UnixMilli())
|
||||
}
|
||||
|
||||
queryResult := transition.ConvertV5TimeSeriesDataToV4Result(qbResult)
|
||||
var anomalyScores []*qbtypes.TimeSeries
|
||||
if qbResult != nil {
|
||||
for _, bucket := range qbResult.Aggregations {
|
||||
anomalyScores = append(anomalyScores, bucket.AnomalyScores...)
|
||||
}
|
||||
}
|
||||
|
||||
hasData := len(queryResult.AnomalyScores) > 0
|
||||
hasData := len(anomalyScores) > 0
|
||||
if missingDataAlert := r.HandleMissingDataAlert(ctx, ts, hasData); missingDataAlert != nil {
|
||||
return ruletypes.Vector{*missingDataAlert}, nil
|
||||
}
|
||||
|
||||
var resultVector ruletypes.Vector
|
||||
|
||||
scoresJSON, _ := json.Marshal(queryResult.AnomalyScores)
|
||||
scoresJSON, _ := json.Marshal(anomalyScores)
|
||||
r.logger.InfoContext(ctx, "anomaly scores", "scores", string(scoresJSON))
|
||||
|
||||
// Filter out new series if newGroupEvalDelay is configured
|
||||
seriesToProcess := queryResult.AnomalyScores
|
||||
seriesToProcess := anomalyScores
|
||||
if r.ShouldSkipNewGroups() {
|
||||
filteredSeries, filterErr := r.BaseRule.FilterNewSeries(ctx, ts, seriesToProcess)
|
||||
// In case of error we log the error and continue with the original series
|
||||
@@ -316,7 +202,7 @@ func (r *AnomalyRule) buildAndRunQueryV5(ctx context.Context, orgID valuer.UUID,
|
||||
|
||||
for _, series := range seriesToProcess {
|
||||
if !r.Condition().ShouldEval(series) {
|
||||
r.logger.InfoContext(ctx, "not enough data points to evaluate series, skipping", "ruleid", r.ID(), "numPoints", len(series.Points), "requiredPoints", r.Condition().RequiredNumPoints)
|
||||
r.logger.InfoContext(ctx, "not enough data points to evaluate series, skipping", "ruleid", r.ID(), "numPoints", len(series.Values), "requiredPoints", r.Condition().RequiredNumPoints)
|
||||
continue
|
||||
}
|
||||
results, err := r.Threshold.Eval(*series, r.Unit(), ruletypes.EvalData{
|
||||
@@ -337,16 +223,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (int, error) {
|
||||
|
||||
valueFormatter := formatter.FromUnit(r.Unit())
|
||||
|
||||
var res ruletypes.Vector
|
||||
var err error
|
||||
|
||||
if r.version == "v5" {
|
||||
r.logger.InfoContext(ctx, "running v5 query")
|
||||
res, err = r.buildAndRunQueryV5(ctx, r.OrgID(), ts)
|
||||
} else {
|
||||
r.logger.InfoContext(ctx, "running v4 query")
|
||||
res, err = r.buildAndRunQuery(ctx, r.OrgID(), ts)
|
||||
}
|
||||
res, err := r.buildAndRunQuery(ctx, r.OrgID(), ts)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -8,28 +8,27 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/anomaly"
|
||||
anomalyV2 "github.com/SigNoz/signoz/ee/anomaly"
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore/telemetrystoretest"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/ruletypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
// mockAnomalyProvider is a mock implementation of anomaly.Provider for testing.
|
||||
// We need this because the anomaly provider makes 6 different queries for various
|
||||
// time periods (current, past period, current season, past season, past 2 seasons,
|
||||
// past 3 seasons), making it cumbersome to create mock data.
|
||||
type mockAnomalyProvider struct {
|
||||
responses []*anomaly.GetAnomaliesResponse
|
||||
// mockAnomalyProviderV2 is a mock implementation of anomalyV2.Provider for testing.
|
||||
type mockAnomalyProviderV2 struct {
|
||||
responses []*anomalyV2.AnomaliesResponse
|
||||
callCount int
|
||||
}
|
||||
|
||||
func (m *mockAnomalyProvider) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *anomaly.GetAnomaliesRequest) (*anomaly.GetAnomaliesResponse, error) {
|
||||
func (m *mockAnomalyProviderV2) GetAnomalies(ctx context.Context, orgID valuer.UUID, req *anomalyV2.AnomaliesRequest) (*anomalyV2.AnomaliesResponse, error) {
|
||||
if m.callCount >= len(m.responses) {
|
||||
return &anomaly.GetAnomaliesResponse{Results: []*v3.Result{}}, nil
|
||||
return &anomalyV2.AnomaliesResponse{Results: []*qbtypes.TimeSeriesData{}}, nil
|
||||
}
|
||||
resp := m.responses[m.callCount]
|
||||
m.callCount++
|
||||
@@ -82,11 +81,11 @@ func TestAnomalyRule_NoData_AlertOnAbsent(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
responseNoData := &anomaly.GetAnomaliesResponse{
|
||||
Results: []*v3.Result{
|
||||
responseNoData := &anomalyV2.AnomaliesResponse{
|
||||
Results: []*qbtypes.TimeSeriesData{
|
||||
{
|
||||
QueryName: "A",
|
||||
AnomalyScores: []*v3.Series{},
|
||||
QueryName: "A",
|
||||
Aggregations: []*qbtypes.AggregationBucket{},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -129,8 +128,8 @@ func TestAnomalyRule_NoData_AlertOnAbsent(t *testing.T) {
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
rule.provider = &mockAnomalyProvider{
|
||||
responses: []*anomaly.GetAnomaliesResponse{responseNoData},
|
||||
rule.providerV2 = &mockAnomalyProviderV2{
|
||||
responses: []*anomalyV2.AnomaliesResponse{responseNoData},
|
||||
}
|
||||
|
||||
alertsFound, err := rule.Eval(context.Background(), evalTime)
|
||||
@@ -190,11 +189,11 @@ func TestAnomalyRule_NoData_AbsentFor(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
responseNoData := &anomaly.GetAnomaliesResponse{
|
||||
Results: []*v3.Result{
|
||||
responseNoData := &anomalyV2.AnomaliesResponse{
|
||||
Results: []*qbtypes.TimeSeriesData{
|
||||
{
|
||||
QueryName: "A",
|
||||
AnomalyScores: []*v3.Series{},
|
||||
QueryName: "A",
|
||||
Aggregations: []*qbtypes.AggregationBucket{},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -228,16 +227,22 @@ func TestAnomalyRule_NoData_AbsentFor(t *testing.T) {
|
||||
t1 := baseTime.Add(5 * time.Minute)
|
||||
t2 := t1.Add(c.timeBetweenEvals)
|
||||
|
||||
responseWithData := &anomaly.GetAnomaliesResponse{
|
||||
Results: []*v3.Result{
|
||||
responseWithData := &anomalyV2.AnomaliesResponse{
|
||||
Results: []*qbtypes.TimeSeriesData{
|
||||
{
|
||||
QueryName: "A",
|
||||
AnomalyScores: []*v3.Series{
|
||||
Aggregations: []*qbtypes.AggregationBucket{
|
||||
{
|
||||
Labels: map[string]string{"test": "label"},
|
||||
Points: []v3.Point{
|
||||
{Timestamp: baseTime.UnixMilli(), Value: 1.0},
|
||||
{Timestamp: baseTime.Add(time.Minute).UnixMilli(), Value: 1.5},
|
||||
AnomalyScores: []*qbtypes.TimeSeries{
|
||||
{
|
||||
Labels: []*qbtypes.Label{
|
||||
{Key: telemetrytypes.TelemetryFieldKey{Name: "test"}, Value: "label"},
|
||||
},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: baseTime.UnixMilli(), Value: 1.0},
|
||||
{Timestamp: baseTime.Add(time.Minute).UnixMilli(), Value: 1.5},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -252,8 +257,8 @@ func TestAnomalyRule_NoData_AbsentFor(t *testing.T) {
|
||||
rule, err := NewAnomalyRule("test-anomaly-rule", valuer.GenerateUUID(), &postableRule, reader, nil, logger, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
rule.provider = &mockAnomalyProvider{
|
||||
responses: []*anomaly.GetAnomaliesResponse{responseWithData, responseNoData},
|
||||
rule.providerV2 = &mockAnomalyProviderV2{
|
||||
responses: []*anomalyV2.AnomaliesResponse{responseWithData, responseNoData},
|
||||
}
|
||||
|
||||
alertsFound1, err := rule.Eval(context.Background(), t1)
|
||||
|
||||
@@ -6,7 +6,7 @@ import logEvent from 'api/common/logEvent';
|
||||
import PromQLIcon from 'assets/Dashboard/PromQl';
|
||||
import { QueryBuilderV2 } from 'components/QueryBuilderV2/QueryBuilderV2';
|
||||
import { ALERTS_DATA_SOURCE_MAP } from 'constants/alerts';
|
||||
import { ENTITY_VERSION_V4 } from 'constants/app';
|
||||
import { ENTITY_VERSION_V5 } from 'constants/app';
|
||||
import { PANEL_TYPES } from 'constants/queryBuilder';
|
||||
import { QBShortcuts } from 'constants/shortcuts/QBShortcuts';
|
||||
import RunQueryBtn from 'container/QueryBuilder/components/RunQueryBtn/RunQueryBtn';
|
||||
@@ -63,12 +63,8 @@ function QuerySection({
|
||||
signalSource: signalSource === 'meter' ? 'meter' : '',
|
||||
}}
|
||||
showTraceOperator={alertType === AlertTypes.TRACES_BASED_ALERT}
|
||||
showFunctions={
|
||||
(alertType === AlertTypes.METRICS_BASED_ALERT &&
|
||||
alertDef.version === ENTITY_VERSION_V4) ||
|
||||
alertType === AlertTypes.LOGS_BASED_ALERT
|
||||
}
|
||||
version={alertDef.version || 'v3'}
|
||||
showFunctions
|
||||
version={ENTITY_VERSION_V5}
|
||||
onSignalSourceChange={handleSignalSourceChange}
|
||||
signalSourceChangeEnabled
|
||||
/>
|
||||
|
||||
@@ -3,8 +3,9 @@ package flagger
|
||||
import "github.com/SigNoz/signoz/pkg/types/featuretypes"
|
||||
|
||||
var (
|
||||
FeatureUseSpanMetrics = featuretypes.MustNewName("use_span_metrics")
|
||||
FeatureKafkaSpanEval = featuretypes.MustNewName("kafka_span_eval")
|
||||
FeatureUseSpanMetrics = featuretypes.MustNewName("use_span_metrics")
|
||||
FeatureInterpolationEnabled = featuretypes.MustNewName("interpolation_enabled")
|
||||
FeatureKafkaSpanEval = featuretypes.MustNewName("kafka_span_eval")
|
||||
)
|
||||
|
||||
func MustNewRegistry() featuretypes.Registry {
|
||||
@@ -17,6 +18,14 @@ func MustNewRegistry() featuretypes.Registry {
|
||||
DefaultVariant: featuretypes.MustNewName("disabled"),
|
||||
Variants: featuretypes.NewBooleanVariants(),
|
||||
},
|
||||
&featuretypes.Feature{
|
||||
Name: FeatureInterpolationEnabled,
|
||||
Kind: featuretypes.KindBoolean,
|
||||
Stage: featuretypes.StageExperimental,
|
||||
Description: "Controls whether to enable interpolation",
|
||||
DefaultVariant: featuretypes.MustNewName("disabled"),
|
||||
Variants: featuretypes.NewBooleanVariants(),
|
||||
},
|
||||
&featuretypes.Feature{
|
||||
Name: FeatureKafkaSpanEval,
|
||||
Kind: featuretypes.KindBoolean,
|
||||
|
||||
@@ -625,7 +625,7 @@ func (r *BaseRule) extractMetricAndGroupBys(ctx context.Context) (map[string][]s
|
||||
|
||||
// FilterNewSeries filters out items that are too new based on metadata first_seen timestamps.
|
||||
// Returns the filtered series (old ones) excluding new series that are still within the grace period.
|
||||
func (r *BaseRule) FilterNewSeries(ctx context.Context, ts time.Time, series []*v3.Series) ([]*v3.Series, error) {
|
||||
func (r *BaseRule) FilterNewSeries(ctx context.Context, ts time.Time, series []*qbtypes.TimeSeries) ([]*qbtypes.TimeSeries, error) {
|
||||
// Extract metric names and groupBy keys
|
||||
metricToGroupedFields, err := r.extractMetricAndGroupBys(ctx)
|
||||
if err != nil {
|
||||
@@ -642,7 +642,7 @@ func (r *BaseRule) FilterNewSeries(ctx context.Context, ts time.Time, series []*
|
||||
seriesIdxToLookupKeys := make(map[int][]telemetrytypes.MetricMetadataLookupKey) // series index -> lookup keys
|
||||
|
||||
for i := 0; i < len(series); i++ {
|
||||
metricLabelMap := series[i].Labels
|
||||
metricLabelMap := series[i].LabelsMap()
|
||||
|
||||
// Collect groupBy attribute-value pairs for this series
|
||||
seriesKeys := make([]telemetrytypes.MetricMetadataLookupKey, 0)
|
||||
@@ -689,7 +689,7 @@ func (r *BaseRule) FilterNewSeries(ctx context.Context, ts time.Time, series []*
|
||||
}
|
||||
|
||||
// Filter series based on first_seen + delay
|
||||
filteredSeries := make([]*v3.Series, 0, len(series))
|
||||
filteredSeries := make([]*qbtypes.TimeSeries, 0, len(series))
|
||||
evalTimeMs := ts.UnixMilli()
|
||||
newGroupEvalDelayMs := r.newGroupEvalDelay.Milliseconds()
|
||||
|
||||
@@ -727,7 +727,7 @@ func (r *BaseRule) FilterNewSeries(ctx context.Context, ts time.Time, series []*
|
||||
// Check if first_seen + delay has passed
|
||||
if maxFirstSeen+newGroupEvalDelayMs > evalTimeMs {
|
||||
// Still within grace period, skip this series
|
||||
r.logger.InfoContext(ctx, "Skipping new series", "rule_name", r.Name(), "series_idx", i, "max_first_seen", maxFirstSeen, "eval_time_ms", evalTimeMs, "delay_ms", newGroupEvalDelayMs, "labels", series[i].Labels)
|
||||
r.logger.InfoContext(ctx, "Skipping new series", "rule_name", r.Name(), "series_idx", i, "max_first_seen", maxFirstSeen, "eval_time_ms", evalTimeMs, "delay_ms", newGroupEvalDelayMs, "labels", series[i].LabelsMap())
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -26,33 +26,33 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
// createTestSeries creates a *v3.Series with the given labels and optional points
|
||||
// createTestSeries creates a *qbtypes.TimeSeries with the given labels and optional values
|
||||
// so we don't exactly need the points in the series because the labels are used to determine if the series is new or old
|
||||
// we use the labels to create a lookup key for the series and then check the first_seen timestamp for the series in the metadata table
|
||||
func createTestSeries(labels map[string]string, points []v3.Point) *v3.Series {
|
||||
func createTestSeries(labels map[string]string, points []*qbtypes.TimeSeriesValue) *qbtypes.TimeSeries {
|
||||
if points == nil {
|
||||
points = []v3.Point{}
|
||||
points = []*qbtypes.TimeSeriesValue{}
|
||||
}
|
||||
return &v3.Series{
|
||||
Labels: labels,
|
||||
Points: points,
|
||||
lbls := make([]*qbtypes.Label, 0, len(labels))
|
||||
for k, v := range labels {
|
||||
lbls = append(lbls, &qbtypes.Label{Key: telemetrytypes.TelemetryFieldKey{Name: k}, Value: v})
|
||||
}
|
||||
return &qbtypes.TimeSeries{
|
||||
Labels: lbls,
|
||||
Values: points,
|
||||
}
|
||||
}
|
||||
|
||||
// seriesEqual compares two v3.Series by their labels
|
||||
// seriesEqual compares two *qbtypes.TimeSeries by their labels
|
||||
// Returns true if the series have the same labels (order doesn't matter)
|
||||
func seriesEqual(s1, s2 *v3.Series) bool {
|
||||
if s1 == nil && s2 == nil {
|
||||
return true
|
||||
}
|
||||
if s1 == nil || s2 == nil {
|
||||
func seriesEqual(s1, s2 *qbtypes.TimeSeries) bool {
|
||||
m1 := s1.LabelsMap()
|
||||
m2 := s2.LabelsMap()
|
||||
if len(m1) != len(m2) {
|
||||
return false
|
||||
}
|
||||
if len(s1.Labels) != len(s2.Labels) {
|
||||
return false
|
||||
}
|
||||
for k, v := range s1.Labels {
|
||||
if s2.Labels[k] != v {
|
||||
for k, v := range m1 {
|
||||
if m2[k] != v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -149,11 +149,11 @@ func createPostableRule(compositeQuery *v3.CompositeQuery) ruletypes.PostableRul
|
||||
type filterNewSeriesTestCase struct {
|
||||
name string
|
||||
compositeQuery *v3.CompositeQuery
|
||||
series []*v3.Series
|
||||
series []*qbtypes.TimeSeries
|
||||
firstSeenMap map[telemetrytypes.MetricMetadataLookupKey]int64
|
||||
newGroupEvalDelay valuer.TextDuration
|
||||
evalTime time.Time
|
||||
expectedFiltered []*v3.Series // series that should be in the final filtered result (old enough)
|
||||
expectedFiltered []*qbtypes.TimeSeries // series that should be in the final filtered result (old enough)
|
||||
expectError bool
|
||||
}
|
||||
|
||||
@@ -193,7 +193,7 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
series: []*v3.Series{
|
||||
series: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"service_name": "svc-old", "env": "prod"}, nil),
|
||||
createTestSeries(map[string]string{"service_name": "svc-new", "env": "prod"}, nil),
|
||||
createTestSeries(map[string]string{"service_name": "svc-missing", "env": "stage"}, nil),
|
||||
@@ -205,7 +205,7 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
|
||||
),
|
||||
newGroupEvalDelay: defaultNewGroupEvalDelay,
|
||||
evalTime: defaultEvalTime,
|
||||
expectedFiltered: []*v3.Series{
|
||||
expectedFiltered: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"service_name": "svc-old", "env": "prod"}, nil),
|
||||
createTestSeries(map[string]string{"service_name": "svc-missing", "env": "stage"}, nil),
|
||||
}, // svc-old and svc-missing should be included; svc-new is filtered out
|
||||
@@ -227,7 +227,7 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
series: []*v3.Series{
|
||||
series: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"service_name": "svc-new1", "env": "prod"}, nil),
|
||||
createTestSeries(map[string]string{"service_name": "svc-new2", "env": "stage"}, nil),
|
||||
},
|
||||
@@ -237,7 +237,7 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
|
||||
),
|
||||
newGroupEvalDelay: defaultNewGroupEvalDelay,
|
||||
evalTime: defaultEvalTime,
|
||||
expectedFiltered: []*v3.Series{}, // all should be filtered out (new series)
|
||||
expectedFiltered: []*qbtypes.TimeSeries{}, // all should be filtered out (new series)
|
||||
},
|
||||
{
|
||||
name: "all old series - ClickHouse query",
|
||||
@@ -254,7 +254,7 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
series: []*v3.Series{
|
||||
series: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"service_name": "svc-old1", "env": "prod"}, nil),
|
||||
createTestSeries(map[string]string{"service_name": "svc-old2", "env": "stage"}, nil),
|
||||
},
|
||||
@@ -264,7 +264,7 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
|
||||
),
|
||||
newGroupEvalDelay: defaultNewGroupEvalDelay,
|
||||
evalTime: defaultEvalTime,
|
||||
expectedFiltered: []*v3.Series{
|
||||
expectedFiltered: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"service_name": "svc-old1", "env": "prod"}, nil),
|
||||
createTestSeries(map[string]string{"service_name": "svc-old2", "env": "stage"}, nil),
|
||||
}, // all should be included (old series)
|
||||
@@ -292,13 +292,13 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
series: []*v3.Series{
|
||||
series: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"service_name": "svc1", "env": "prod"}, nil),
|
||||
},
|
||||
firstSeenMap: make(map[telemetrytypes.MetricMetadataLookupKey]int64),
|
||||
newGroupEvalDelay: defaultNewGroupEvalDelay,
|
||||
evalTime: defaultEvalTime,
|
||||
expectedFiltered: []*v3.Series{
|
||||
expectedFiltered: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"service_name": "svc1", "env": "prod"}, nil),
|
||||
}, // early return, no filtering - all series included
|
||||
},
|
||||
@@ -322,13 +322,13 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
series: []*v3.Series{
|
||||
series: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"service_name": "svc1", "env": "prod"}, nil),
|
||||
},
|
||||
firstSeenMap: make(map[telemetrytypes.MetricMetadataLookupKey]int64),
|
||||
newGroupEvalDelay: defaultNewGroupEvalDelay,
|
||||
evalTime: defaultEvalTime,
|
||||
expectedFiltered: []*v3.Series{
|
||||
expectedFiltered: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"service_name": "svc1", "env": "prod"}, nil),
|
||||
}, // early return, no filtering - all series included
|
||||
},
|
||||
@@ -358,13 +358,13 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
series: []*v3.Series{
|
||||
series: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"status": "200"}, nil), // no service_name or env
|
||||
},
|
||||
firstSeenMap: make(map[telemetrytypes.MetricMetadataLookupKey]int64),
|
||||
newGroupEvalDelay: defaultNewGroupEvalDelay,
|
||||
evalTime: defaultEvalTime,
|
||||
expectedFiltered: []*v3.Series{
|
||||
expectedFiltered: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"status": "200"}, nil),
|
||||
}, // series included as we can't decide if it's new or old
|
||||
},
|
||||
@@ -385,7 +385,7 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
series: []*v3.Series{
|
||||
series: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"service_name": "svc-old", "env": "prod"}, nil),
|
||||
createTestSeries(map[string]string{"service_name": "svc-no-metadata", "env": "prod"}, nil),
|
||||
},
|
||||
@@ -393,7 +393,7 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
|
||||
// svc-no-metadata has no entry in firstSeenMap
|
||||
newGroupEvalDelay: defaultNewGroupEvalDelay,
|
||||
evalTime: defaultEvalTime,
|
||||
expectedFiltered: []*v3.Series{
|
||||
expectedFiltered: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"service_name": "svc-old", "env": "prod"}, nil),
|
||||
createTestSeries(map[string]string{"service_name": "svc-no-metadata", "env": "prod"}, nil),
|
||||
}, // both should be included - svc-old is old, svc-no-metadata can't be decided
|
||||
@@ -413,7 +413,7 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
series: []*v3.Series{
|
||||
series: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"service_name": "svc-partial", "env": "prod"}, nil),
|
||||
},
|
||||
// Only provide metadata for service_name, not env
|
||||
@@ -423,7 +423,7 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
|
||||
},
|
||||
newGroupEvalDelay: defaultNewGroupEvalDelay,
|
||||
evalTime: defaultEvalTime,
|
||||
expectedFiltered: []*v3.Series{
|
||||
expectedFiltered: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"service_name": "svc-partial", "env": "prod"}, nil),
|
||||
}, // has some metadata, uses max first_seen which is old
|
||||
},
|
||||
@@ -453,11 +453,11 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
series: []*v3.Series{},
|
||||
series: []*qbtypes.TimeSeries{},
|
||||
firstSeenMap: make(map[telemetrytypes.MetricMetadataLookupKey]int64),
|
||||
newGroupEvalDelay: defaultNewGroupEvalDelay,
|
||||
evalTime: defaultEvalTime,
|
||||
expectedFiltered: []*v3.Series{},
|
||||
expectedFiltered: []*qbtypes.TimeSeries{},
|
||||
},
|
||||
{
|
||||
name: "zero delay - Builder",
|
||||
@@ -485,13 +485,13 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
series: []*v3.Series{
|
||||
series: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"service_name": "svc1", "env": "prod"}, nil),
|
||||
},
|
||||
firstSeenMap: createFirstSeenMap("request_total", defaultGroupByFields, defaultEvalTime, defaultDelay, true, "svc1", "prod"),
|
||||
newGroupEvalDelay: valuer.TextDuration{}, // zero delay
|
||||
evalTime: defaultEvalTime,
|
||||
expectedFiltered: []*v3.Series{
|
||||
expectedFiltered: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"service_name": "svc1", "env": "prod"}, nil),
|
||||
}, // with zero delay, all series pass
|
||||
},
|
||||
@@ -526,7 +526,7 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
series: []*v3.Series{
|
||||
series: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"service_name": "svc1", "env": "prod"}, nil),
|
||||
},
|
||||
firstSeenMap: mergeFirstSeenMaps(
|
||||
@@ -535,7 +535,7 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
|
||||
),
|
||||
newGroupEvalDelay: defaultNewGroupEvalDelay,
|
||||
evalTime: defaultEvalTime,
|
||||
expectedFiltered: []*v3.Series{
|
||||
expectedFiltered: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"service_name": "svc1", "env": "prod"}, nil),
|
||||
},
|
||||
},
|
||||
@@ -565,7 +565,7 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
series: []*v3.Series{
|
||||
series: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"service_name": "svc1", "env": "prod"}, nil),
|
||||
},
|
||||
// service_name is old, env is new - should use max (new)
|
||||
@@ -575,7 +575,7 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
|
||||
),
|
||||
newGroupEvalDelay: defaultNewGroupEvalDelay,
|
||||
evalTime: defaultEvalTime,
|
||||
expectedFiltered: []*v3.Series{}, // max first_seen is new, so should be filtered out
|
||||
expectedFiltered: []*qbtypes.TimeSeries{}, // max first_seen is new, so should be filtered out
|
||||
},
|
||||
{
|
||||
name: "Logs query - should skip filtering and return empty skip indexes",
|
||||
@@ -600,14 +600,14 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
series: []*v3.Series{
|
||||
series: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"service_name": "svc1"}, nil),
|
||||
createTestSeries(map[string]string{"service_name": "svc2"}, nil),
|
||||
},
|
||||
firstSeenMap: make(map[telemetrytypes.MetricMetadataLookupKey]int64),
|
||||
newGroupEvalDelay: defaultNewGroupEvalDelay,
|
||||
evalTime: defaultEvalTime,
|
||||
expectedFiltered: []*v3.Series{
|
||||
expectedFiltered: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"service_name": "svc1"}, nil),
|
||||
createTestSeries(map[string]string{"service_name": "svc2"}, nil),
|
||||
}, // Logs queries should return early, no filtering - all included
|
||||
@@ -635,14 +635,14 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
series: []*v3.Series{
|
||||
series: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"service_name": "svc1"}, nil),
|
||||
createTestSeries(map[string]string{"service_name": "svc2"}, nil),
|
||||
},
|
||||
firstSeenMap: make(map[telemetrytypes.MetricMetadataLookupKey]int64),
|
||||
newGroupEvalDelay: defaultNewGroupEvalDelay,
|
||||
evalTime: defaultEvalTime,
|
||||
expectedFiltered: []*v3.Series{
|
||||
expectedFiltered: []*qbtypes.TimeSeries{
|
||||
createTestSeries(map[string]string{"service_name": "svc1"}, nil),
|
||||
createTestSeries(map[string]string{"service_name": "svc2"}, nil),
|
||||
}, // Traces queries should return early, no filtering - all included
|
||||
@@ -724,14 +724,14 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
|
||||
// Build a map to count occurrences of each unique label combination in expected series
|
||||
expectedCounts := make(map[string]int)
|
||||
for _, expected := range tt.expectedFiltered {
|
||||
key := labelsKey(expected.Labels)
|
||||
key := labelsKey(expected.LabelsMap())
|
||||
expectedCounts[key]++
|
||||
}
|
||||
|
||||
// Build a map to count occurrences of each unique label combination in filtered series
|
||||
actualCounts := make(map[string]int)
|
||||
for _, filtered := range filteredSeries {
|
||||
key := labelsKey(filtered.Labels)
|
||||
key := labelsKey(filtered.LabelsMap())
|
||||
actualCounts[key]++
|
||||
}
|
||||
|
||||
|
||||
@@ -12,19 +12,18 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/formatter"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
qslabels "github.com/SigNoz/signoz/pkg/query-service/utils/labels"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/times"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/timestamp"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/ruletypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
)
|
||||
|
||||
type PromRule struct {
|
||||
*BaseRule
|
||||
version string
|
||||
prometheus prometheus.Prometheus
|
||||
}
|
||||
|
||||
@@ -48,7 +47,6 @@ func NewPromRule(
|
||||
|
||||
p := PromRule{
|
||||
BaseRule: baseRule,
|
||||
version: postableRule.Version,
|
||||
prometheus: prometheus,
|
||||
}
|
||||
p.logger = logger
|
||||
@@ -83,48 +81,30 @@ func (r *PromRule) GetSelectedQuery() string {
|
||||
}
|
||||
|
||||
func (r *PromRule) getPqlQuery() (string, error) {
|
||||
if r.version == "v5" {
|
||||
if len(r.ruleCondition.CompositeQuery.Queries) > 0 {
|
||||
selectedQuery := r.GetSelectedQuery()
|
||||
for _, item := range r.ruleCondition.CompositeQuery.Queries {
|
||||
switch item.Type {
|
||||
case qbtypes.QueryTypePromQL:
|
||||
promQuery, ok := item.Spec.(qbtypes.PromQuery)
|
||||
if !ok {
|
||||
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid promql query spec %T", item.Spec)
|
||||
}
|
||||
if promQuery.Name == selectedQuery {
|
||||
return promQuery.Query, nil
|
||||
}
|
||||
if len(r.ruleCondition.CompositeQuery.Queries) > 0 {
|
||||
selectedQuery := r.GetSelectedQuery()
|
||||
for _, item := range r.ruleCondition.CompositeQuery.Queries {
|
||||
switch item.Type {
|
||||
case qbtypes.QueryTypePromQL:
|
||||
promQuery, ok := item.Spec.(qbtypes.PromQuery)
|
||||
if !ok {
|
||||
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid promql query spec %T", item.Spec)
|
||||
}
|
||||
if promQuery.Name == selectedQuery {
|
||||
return promQuery.Query, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("invalid promql rule setup")
|
||||
}
|
||||
|
||||
if r.ruleCondition.CompositeQuery.QueryType == v3.QueryTypePromQL {
|
||||
if len(r.ruleCondition.CompositeQuery.PromQueries) > 0 {
|
||||
selectedQuery := r.GetSelectedQuery()
|
||||
if promQuery, ok := r.ruleCondition.CompositeQuery.PromQueries[selectedQuery]; ok {
|
||||
query := promQuery.Query
|
||||
if query == "" {
|
||||
return query, fmt.Errorf("a promquery needs to be set for this rule to function")
|
||||
}
|
||||
return query, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("invalid promql rule query")
|
||||
return "", fmt.Errorf("invalid promql rule setup")
|
||||
}
|
||||
|
||||
func (r *PromRule) matrixToV3Series(res promql.Matrix) []*v3.Series {
|
||||
v3Series := make([]*v3.Series, 0, len(res))
|
||||
func matrixToTimeSeries(res promql.Matrix) []*qbtypes.TimeSeries {
|
||||
result := make([]*qbtypes.TimeSeries, 0, len(res))
|
||||
for _, series := range res {
|
||||
commonSeries := toCommonSeries(series)
|
||||
v3Series = append(v3Series, &commonSeries)
|
||||
result = append(result, promSeriesToTimeSeries(series))
|
||||
}
|
||||
return v3Series
|
||||
return result
|
||||
}
|
||||
|
||||
func (r *PromRule) buildAndRunQuery(ctx context.Context, ts time.Time) (ruletypes.Vector, error) {
|
||||
@@ -143,31 +123,31 @@ func (r *PromRule) buildAndRunQuery(ctx context.Context, ts time.Time) (ruletype
|
||||
return nil, err
|
||||
}
|
||||
|
||||
matrixToProcess := r.matrixToV3Series(res)
|
||||
seriesToProcess := matrixToTimeSeries(res)
|
||||
|
||||
hasData := len(matrixToProcess) > 0
|
||||
hasData := len(seriesToProcess) > 0
|
||||
if missingDataAlert := r.HandleMissingDataAlert(ctx, ts, hasData); missingDataAlert != nil {
|
||||
return ruletypes.Vector{*missingDataAlert}, nil
|
||||
}
|
||||
|
||||
// Filter out new series if newGroupEvalDelay is configured
|
||||
if r.ShouldSkipNewGroups() {
|
||||
filteredSeries, filterErr := r.BaseRule.FilterNewSeries(ctx, ts, matrixToProcess)
|
||||
filteredSeries, filterErr := r.BaseRule.FilterNewSeries(ctx, ts, seriesToProcess)
|
||||
// In case of error we log the error and continue with the original series
|
||||
if filterErr != nil {
|
||||
r.logger.ErrorContext(ctx, "Error filtering new series, ", "error", filterErr, "rule_name", r.Name())
|
||||
} else {
|
||||
matrixToProcess = filteredSeries
|
||||
seriesToProcess = filteredSeries
|
||||
}
|
||||
}
|
||||
|
||||
var resultVector ruletypes.Vector
|
||||
|
||||
for _, series := range matrixToProcess {
|
||||
for _, series := range seriesToProcess {
|
||||
if !r.Condition().ShouldEval(series) {
|
||||
r.logger.InfoContext(
|
||||
ctx, "not enough data points to evaluate series, skipping",
|
||||
"rule_id", r.ID(), "num_points", len(series.Points), "required_points", r.Condition().RequiredNumPoints,
|
||||
"rule_id", r.ID(), "num_points", len(series.Values), "required_points", r.Condition().RequiredNumPoints,
|
||||
)
|
||||
continue
|
||||
}
|
||||
@@ -454,26 +434,25 @@ func (r *PromRule) RunAlertQuery(ctx context.Context, qs string, start, end time
|
||||
}
|
||||
}
|
||||
|
||||
func toCommonSeries(series promql.Series) v3.Series {
|
||||
commonSeries := v3.Series{
|
||||
Labels: make(map[string]string),
|
||||
LabelsArray: make([]map[string]string, 0),
|
||||
Points: make([]v3.Point, 0),
|
||||
func promSeriesToTimeSeries(series promql.Series) *qbtypes.TimeSeries {
|
||||
ts := &qbtypes.TimeSeries{
|
||||
Labels: make([]*qbtypes.Label, 0, len(series.Metric)),
|
||||
Values: make([]*qbtypes.TimeSeriesValue, 0, len(series.Floats)),
|
||||
}
|
||||
|
||||
for _, lbl := range series.Metric {
|
||||
commonSeries.Labels[lbl.Name] = lbl.Value
|
||||
commonSeries.LabelsArray = append(commonSeries.LabelsArray, map[string]string{
|
||||
lbl.Name: lbl.Value,
|
||||
ts.Labels = append(ts.Labels, &qbtypes.Label{
|
||||
Key: telemetrytypes.TelemetryFieldKey{Name: lbl.Name},
|
||||
Value: lbl.Value,
|
||||
})
|
||||
}
|
||||
|
||||
for _, f := range series.Floats {
|
||||
commonSeries.Points = append(commonSeries.Points, v3.Point{
|
||||
ts.Values = append(ts.Values, &qbtypes.TimeSeriesValue{
|
||||
Timestamp: f.T,
|
||||
Value: f.F,
|
||||
})
|
||||
}
|
||||
|
||||
return commonSeries
|
||||
return ts
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
qslabels "github.com/SigNoz/signoz/pkg/query-service/utils/labels"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore/telemetrystoretest"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/ruletypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
@@ -47,9 +48,13 @@ func TestPromRuleEval(t *testing.T) {
|
||||
RuleCondition: &ruletypes.RuleCondition{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypePromQL,
|
||||
PromQueries: map[string]*v3.PromQuery{
|
||||
"A": {
|
||||
Query: "dummy_query", // This is not used in the test
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypePromQL,
|
||||
Spec: qbtypes.PromQuery{
|
||||
Name: "A",
|
||||
Query: "dummy_query", // This is not used in the test
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -62,7 +67,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp string
|
||||
matchType string
|
||||
target float64
|
||||
expectedAlertSample v3.Point
|
||||
expectedAlertSample float64
|
||||
expectedVectorValues []float64 // Expected values in result vector
|
||||
}{
|
||||
// Test cases for Equals Always
|
||||
@@ -80,7 +85,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp: "3", // Equals
|
||||
matchType: "2", // Always
|
||||
target: 0.0,
|
||||
expectedAlertSample: v3.Point{Value: 0.0},
|
||||
expectedAlertSample: 0.0,
|
||||
expectedVectorValues: []float64{0.0},
|
||||
},
|
||||
{
|
||||
@@ -145,7 +150,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp: "3", // Equals
|
||||
matchType: "1", // Once
|
||||
target: 0.0,
|
||||
expectedAlertSample: v3.Point{Value: 0.0},
|
||||
expectedAlertSample: 0.0,
|
||||
expectedVectorValues: []float64{0.0},
|
||||
},
|
||||
{
|
||||
@@ -162,7 +167,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp: "3", // Equals
|
||||
matchType: "1", // Once
|
||||
target: 0.0,
|
||||
expectedAlertSample: v3.Point{Value: 0.0},
|
||||
expectedAlertSample: 0.0,
|
||||
},
|
||||
{
|
||||
values: pql.Series{
|
||||
@@ -178,7 +183,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp: "3", // Equals
|
||||
matchType: "1", // Once
|
||||
target: 0.0,
|
||||
expectedAlertSample: v3.Point{Value: 0.0},
|
||||
expectedAlertSample: 0.0,
|
||||
},
|
||||
{
|
||||
values: pql.Series{
|
||||
@@ -211,7 +216,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp: "1", // Greater Than
|
||||
matchType: "2", // Always
|
||||
target: 1.5,
|
||||
expectedAlertSample: v3.Point{Value: 2.0},
|
||||
expectedAlertSample: 2.0,
|
||||
expectedVectorValues: []float64{2.0},
|
||||
},
|
||||
{
|
||||
@@ -228,7 +233,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp: "1", // Above
|
||||
matchType: "2", // Always
|
||||
target: 2.0,
|
||||
expectedAlertSample: v3.Point{Value: 3.0},
|
||||
expectedAlertSample: 3.0,
|
||||
},
|
||||
{
|
||||
values: pql.Series{
|
||||
@@ -244,7 +249,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp: "2", // Below
|
||||
matchType: "2", // Always
|
||||
target: 13.0,
|
||||
expectedAlertSample: v3.Point{Value: 12.0},
|
||||
expectedAlertSample: 12.0,
|
||||
},
|
||||
{
|
||||
values: pql.Series{
|
||||
@@ -276,7 +281,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp: "1", // Greater Than
|
||||
matchType: "1", // Once
|
||||
target: 4.5,
|
||||
expectedAlertSample: v3.Point{Value: 10.0},
|
||||
expectedAlertSample: 10.0,
|
||||
expectedVectorValues: []float64{10.0},
|
||||
},
|
||||
{
|
||||
@@ -339,7 +344,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp: "4", // Not Equals
|
||||
matchType: "2", // Always
|
||||
target: 0.0,
|
||||
expectedAlertSample: v3.Point{Value: 1.0},
|
||||
expectedAlertSample: 1.0,
|
||||
},
|
||||
{
|
||||
values: pql.Series{
|
||||
@@ -371,7 +376,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp: "4", // Not Equals
|
||||
matchType: "1", // Once
|
||||
target: 0.0,
|
||||
expectedAlertSample: v3.Point{Value: 1.0},
|
||||
expectedAlertSample: 1.0,
|
||||
},
|
||||
{
|
||||
values: pql.Series{
|
||||
@@ -402,7 +407,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp: "4", // Not Equals
|
||||
matchType: "1", // Once
|
||||
target: 0.0,
|
||||
expectedAlertSample: v3.Point{Value: 1.0},
|
||||
expectedAlertSample: 1.0,
|
||||
},
|
||||
{
|
||||
values: pql.Series{
|
||||
@@ -418,7 +423,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp: "4", // Not Equals
|
||||
matchType: "1", // Once
|
||||
target: 0.0,
|
||||
expectedAlertSample: v3.Point{Value: 1.0},
|
||||
expectedAlertSample: 1.0,
|
||||
},
|
||||
// Test cases for Less Than Always
|
||||
{
|
||||
@@ -435,7 +440,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp: "2", // Less Than
|
||||
matchType: "2", // Always
|
||||
target: 4,
|
||||
expectedAlertSample: v3.Point{Value: 1.5},
|
||||
expectedAlertSample: 1.5,
|
||||
},
|
||||
{
|
||||
values: pql.Series{
|
||||
@@ -467,7 +472,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp: "2", // Less Than
|
||||
matchType: "1", // Once
|
||||
target: 4,
|
||||
expectedAlertSample: v3.Point{Value: 2.5},
|
||||
expectedAlertSample: 2.5,
|
||||
},
|
||||
{
|
||||
values: pql.Series{
|
||||
@@ -499,7 +504,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp: "3", // Equals
|
||||
matchType: "3", // OnAverage
|
||||
target: 6.0,
|
||||
expectedAlertSample: v3.Point{Value: 6.0},
|
||||
expectedAlertSample: 6.0,
|
||||
},
|
||||
{
|
||||
values: pql.Series{
|
||||
@@ -530,7 +535,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp: "4", // Not Equals
|
||||
matchType: "3", // OnAverage
|
||||
target: 4.5,
|
||||
expectedAlertSample: v3.Point{Value: 6.0},
|
||||
expectedAlertSample: 6.0,
|
||||
},
|
||||
{
|
||||
values: pql.Series{
|
||||
@@ -561,7 +566,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp: "1", // Greater Than
|
||||
matchType: "3", // OnAverage
|
||||
target: 4.5,
|
||||
expectedAlertSample: v3.Point{Value: 6.0},
|
||||
expectedAlertSample: 6.0,
|
||||
},
|
||||
{
|
||||
values: pql.Series{
|
||||
@@ -577,7 +582,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp: "2", // Less Than
|
||||
matchType: "3", // OnAverage
|
||||
target: 12.0,
|
||||
expectedAlertSample: v3.Point{Value: 6.0},
|
||||
expectedAlertSample: 6.0,
|
||||
},
|
||||
// Test cases for InTotal
|
||||
{
|
||||
@@ -594,7 +599,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp: "3", // Equals
|
||||
matchType: "4", // InTotal
|
||||
target: 30.0,
|
||||
expectedAlertSample: v3.Point{Value: 30.0},
|
||||
expectedAlertSample: 30.0,
|
||||
},
|
||||
{
|
||||
values: pql.Series{
|
||||
@@ -621,7 +626,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp: "4", // Not Equals
|
||||
matchType: "4", // InTotal
|
||||
target: 9.0,
|
||||
expectedAlertSample: v3.Point{Value: 10.0},
|
||||
expectedAlertSample: 10.0,
|
||||
},
|
||||
{
|
||||
values: pql.Series{
|
||||
@@ -645,7 +650,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp: "1", // Greater Than
|
||||
matchType: "4", // InTotal
|
||||
target: 10.0,
|
||||
expectedAlertSample: v3.Point{Value: 20.0},
|
||||
expectedAlertSample: 20.0,
|
||||
},
|
||||
{
|
||||
values: pql.Series{
|
||||
@@ -670,7 +675,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
compareOp: "2", // Less Than
|
||||
matchType: "4", // InTotal
|
||||
target: 30.0,
|
||||
expectedAlertSample: v3.Point{Value: 20.0},
|
||||
expectedAlertSample: 20.0,
|
||||
},
|
||||
{
|
||||
values: pql.Series{
|
||||
@@ -708,7 +713,7 @@ func TestPromRuleEval(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
resultVectors, err := rule.Threshold.Eval(toCommonSeries(c.values), rule.Unit(), ruletypes.EvalData{})
|
||||
resultVectors, err := rule.Threshold.Eval(*promSeriesToTimeSeries(c.values), rule.Unit(), ruletypes.EvalData{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Compare full result vector with expected vector
|
||||
@@ -724,12 +729,12 @@ func TestPromRuleEval(t *testing.T) {
|
||||
if len(resultVectors) > 0 {
|
||||
found := false
|
||||
for _, sample := range resultVectors {
|
||||
if sample.V == c.expectedAlertSample.Value {
|
||||
if sample.V == c.expectedAlertSample {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "Expected alert sample value %.2f not found in result vectors for case %d. Got values: %v", c.expectedAlertSample.Value, idx, actualValues)
|
||||
assert.True(t, found, "Expected alert sample value %.2f not found in result vectors for case %d. Got values: %v", c.expectedAlertSample, idx, actualValues)
|
||||
}
|
||||
} else {
|
||||
assert.Empty(t, resultVectors, "Expected no alert but got result vectors for case %d", idx)
|
||||
@@ -754,9 +759,13 @@ func TestPromRuleUnitCombinations(t *testing.T) {
|
||||
RuleCondition: &ruletypes.RuleCondition{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypePromQL,
|
||||
PromQueries: map[string]*v3.PromQuery{
|
||||
"A": {
|
||||
Query: "test_metric",
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypePromQL,
|
||||
Spec: qbtypes.PromQuery{
|
||||
Name: "A",
|
||||
Query: "test_metric",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1013,9 +1022,13 @@ func _Enable_this_after_9146_issue_fix_is_merged_TestPromRuleNoData(t *testing.T
|
||||
RuleCondition: &ruletypes.RuleCondition{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypePromQL,
|
||||
PromQueries: map[string]*v3.PromQuery{
|
||||
"A": {
|
||||
Query: "test_metric",
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypePromQL,
|
||||
Spec: qbtypes.PromQuery{
|
||||
Name: "A",
|
||||
Query: "test_metric",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1124,9 +1137,13 @@ func TestMultipleThresholdPromRule(t *testing.T) {
|
||||
RuleCondition: &ruletypes.RuleCondition{
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypePromQL,
|
||||
PromQueries: map[string]*v3.PromQuery{
|
||||
"A": {
|
||||
Query: "test_metric",
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypePromQL,
|
||||
Spec: qbtypes.PromQuery{
|
||||
Name: "A",
|
||||
Query: "test_metric",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1361,8 +1378,11 @@ func TestPromRule_NoData(t *testing.T) {
|
||||
MatchType: ruletypes.AtleastOnce,
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypePromQL,
|
||||
PromQueries: map[string]*v3.PromQuery{
|
||||
"A": {Query: "test_metric"},
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypePromQL,
|
||||
Spec: qbtypes.PromQuery{Name: "A", Query: "test_metric"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Thresholds: &ruletypes.RuleThresholdData{
|
||||
@@ -1486,8 +1506,11 @@ func TestPromRule_NoData_AbsentFor(t *testing.T) {
|
||||
Target: &target,
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypePromQL,
|
||||
PromQueries: map[string]*v3.PromQuery{
|
||||
"A": {Query: "test_metric"},
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypePromQL,
|
||||
Spec: qbtypes.PromQuery{Name: "A", Query: "test_metric"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Thresholds: &ruletypes.RuleThresholdData{
|
||||
@@ -1635,8 +1658,11 @@ func TestPromRuleEval_RequireMinPoints(t *testing.T) {
|
||||
MatchType: ruletypes.AtleastOnce,
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: v3.QueryTypePromQL,
|
||||
PromQueries: map[string]*v3.PromQuery{
|
||||
"A": {Query: "test_metric"},
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypePromQL,
|
||||
Spec: qbtypes.PromQuery{Name: "A", Query: "test_metric"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -1,38 +1,24 @@
|
||||
package rules
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/contextlinks"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/common"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/postprocess"
|
||||
"github.com/SigNoz/signoz/pkg/transition"
|
||||
"github.com/SigNoz/signoz/pkg/types/ruletypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/querier"
|
||||
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
|
||||
querytemplate "github.com/SigNoz/signoz/pkg/query-service/utils/queryTemplate"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/times"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/timestamp"
|
||||
|
||||
logsv3 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v3"
|
||||
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/formatter"
|
||||
|
||||
querierV5 "github.com/SigNoz/signoz/pkg/querier"
|
||||
@@ -42,23 +28,9 @@ import (
|
||||
|
||||
type ThresholdRule struct {
|
||||
*BaseRule
|
||||
// Ever since we introduced the new metrics query builder, the version is "v4"
|
||||
// for all the rules
|
||||
// if the version is "v3", then we use the old querier
|
||||
// if the version is "v4", then we use the new querierV2
|
||||
version string
|
||||
|
||||
// querier is used for alerts created before the introduction of new metrics query builder
|
||||
querier interfaces.Querier
|
||||
// querierV2 is used for alerts created after the introduction of new metrics query builder
|
||||
querierV2 interfaces.Querier
|
||||
|
||||
// querierV5 is used for alerts migrated after the introduction of new query builder
|
||||
// querierV5 is the query builder v5 querier used for all alert rule evaluation
|
||||
querierV5 querierV5.Querier
|
||||
|
||||
// used for attribute metadata enrichment for logs and traces
|
||||
logsKeys map[string]v3.AttributeKey
|
||||
spansKeys map[string]v3.AttributeKey
|
||||
}
|
||||
|
||||
var _ Rule = (*ThresholdRule)(nil)
|
||||
@@ -82,25 +54,10 @@ func NewThresholdRule(
|
||||
}
|
||||
|
||||
t := ThresholdRule{
|
||||
BaseRule: baseRule,
|
||||
version: p.Version,
|
||||
BaseRule: baseRule,
|
||||
querierV5: querierV5,
|
||||
}
|
||||
|
||||
querierOption := querier.QuerierOptions{
|
||||
Reader: reader,
|
||||
Cache: nil,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
}
|
||||
|
||||
querierOptsV2 := querierV2.QuerierOptions{
|
||||
Reader: reader,
|
||||
Cache: nil,
|
||||
KeyGenerator: queryBuilder.NewKeyGenerator(),
|
||||
}
|
||||
|
||||
t.querier = querier.NewQuerier(querierOption)
|
||||
t.querierV2 = querierV2.NewQuerier(querierOptsV2)
|
||||
t.querierV5 = querierV5
|
||||
t.reader = reader
|
||||
return &t, nil
|
||||
}
|
||||
@@ -120,169 +77,9 @@ func (r *ThresholdRule) Type() ruletypes.RuleType {
|
||||
return ruletypes.RuleTypeThreshold
|
||||
}
|
||||
|
||||
func (r *ThresholdRule) prepareQueryRange(ctx context.Context, ts time.Time) (*v3.QueryRangeParamsV3, error) {
|
||||
func (r *ThresholdRule) prepareQueryRange(ctx context.Context, ts time.Time) (*qbtypes.QueryRangeRequest, error) {
|
||||
r.logger.InfoContext(
|
||||
ctx, "prepare query range request v4", "ts", ts.UnixMilli(), "eval_window", r.evalWindow.Milliseconds(), "eval_delay", r.evalDelay.Milliseconds(),
|
||||
)
|
||||
|
||||
startTs, endTs := r.Timestamps(ts)
|
||||
start, end := startTs.UnixMilli(), endTs.UnixMilli()
|
||||
|
||||
if r.ruleCondition.QueryType() == v3.QueryTypeClickHouseSQL {
|
||||
params := &v3.QueryRangeParamsV3{
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: int64(math.Max(float64(common.MinAllowedStepInterval(start, end)), 60)),
|
||||
CompositeQuery: &v3.CompositeQuery{
|
||||
QueryType: r.ruleCondition.CompositeQuery.QueryType,
|
||||
PanelType: r.ruleCondition.CompositeQuery.PanelType,
|
||||
BuilderQueries: make(map[string]*v3.BuilderQuery),
|
||||
ClickHouseQueries: make(map[string]*v3.ClickHouseQuery),
|
||||
PromQueries: make(map[string]*v3.PromQuery),
|
||||
Unit: r.ruleCondition.CompositeQuery.Unit,
|
||||
},
|
||||
Variables: make(map[string]interface{}),
|
||||
NoCache: true,
|
||||
}
|
||||
querytemplate.AssignReservedVarsV3(params)
|
||||
for name, chQuery := range r.ruleCondition.CompositeQuery.ClickHouseQueries {
|
||||
if chQuery.Disabled {
|
||||
continue
|
||||
}
|
||||
tmpl := template.New("clickhouse-query")
|
||||
tmpl, err := tmpl.Parse(chQuery.Query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var query bytes.Buffer
|
||||
err = tmpl.Execute(&query, params.Variables)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
params.CompositeQuery.ClickHouseQueries[name] = &v3.ClickHouseQuery{
|
||||
Query: query.String(),
|
||||
Disabled: chQuery.Disabled,
|
||||
Legend: chQuery.Legend,
|
||||
}
|
||||
}
|
||||
return params, nil
|
||||
}
|
||||
|
||||
if r.ruleCondition.CompositeQuery != nil && r.ruleCondition.CompositeQuery.BuilderQueries != nil {
|
||||
for _, q := range r.ruleCondition.CompositeQuery.BuilderQueries {
|
||||
// If the step interval is less than the minimum allowed step interval, set it to the minimum allowed step interval
|
||||
if minStep := common.MinAllowedStepInterval(start, end); q.StepInterval < minStep {
|
||||
q.StepInterval = minStep
|
||||
}
|
||||
|
||||
q.SetShiftByFromFunc()
|
||||
|
||||
if q.DataSource == v3.DataSourceMetrics {
|
||||
// if the time range is greater than 1 day, and less than 1 week set the step interval to be multiple of 5 minutes
|
||||
// if the time range is greater than 1 week, set the step interval to be multiple of 30 mins
|
||||
if end-start >= 24*time.Hour.Milliseconds() && end-start < 7*24*time.Hour.Milliseconds() {
|
||||
q.StepInterval = int64(math.Round(float64(q.StepInterval)/300)) * 300
|
||||
} else if end-start >= 7*24*time.Hour.Milliseconds() {
|
||||
q.StepInterval = int64(math.Round(float64(q.StepInterval)/1800)) * 1800
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if r.ruleCondition.CompositeQuery.PanelType != v3.PanelTypeGraph {
|
||||
r.ruleCondition.CompositeQuery.PanelType = v3.PanelTypeGraph
|
||||
}
|
||||
|
||||
// default mode
|
||||
return &v3.QueryRangeParamsV3{
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: int64(math.Max(float64(common.MinAllowedStepInterval(start, end)), 60)),
|
||||
CompositeQuery: r.ruleCondition.CompositeQuery,
|
||||
Variables: make(map[string]interface{}),
|
||||
NoCache: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *ThresholdRule) prepareLinksToLogs(ctx context.Context, ts time.Time, lbls labels.Labels) string {
|
||||
if r.version == "v5" {
|
||||
return r.prepareLinksToLogsV5(ctx, ts, lbls)
|
||||
}
|
||||
|
||||
selectedQuery := r.GetSelectedQuery()
|
||||
|
||||
qr, err := r.prepareQueryRange(ctx, ts)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
start := time.UnixMilli(qr.Start)
|
||||
end := time.UnixMilli(qr.End)
|
||||
|
||||
// TODO(srikanthccv): handle formula queries
|
||||
if selectedQuery < "A" || selectedQuery > "Z" {
|
||||
return ""
|
||||
}
|
||||
|
||||
q := r.ruleCondition.CompositeQuery.BuilderQueries[selectedQuery]
|
||||
if q == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
if q.DataSource != v3.DataSourceLogs {
|
||||
return ""
|
||||
}
|
||||
|
||||
queryFilter := []v3.FilterItem{}
|
||||
if q.Filters != nil {
|
||||
queryFilter = q.Filters.Items
|
||||
}
|
||||
|
||||
filterItems := contextlinks.PrepareFilters(lbls.Map(), queryFilter, q.GroupBy, r.logsKeys)
|
||||
|
||||
return contextlinks.PrepareLinksToLogs(start, end, filterItems)
|
||||
}
|
||||
|
||||
func (r *ThresholdRule) prepareLinksToTraces(ctx context.Context, ts time.Time, lbls labels.Labels) string {
|
||||
if r.version == "v5" {
|
||||
return r.prepareLinksToTracesV5(ctx, ts, lbls)
|
||||
}
|
||||
|
||||
selectedQuery := r.GetSelectedQuery()
|
||||
|
||||
qr, err := r.prepareQueryRange(ctx, ts)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
start := time.UnixMilli(qr.Start)
|
||||
end := time.UnixMilli(qr.End)
|
||||
|
||||
// TODO(srikanthccv): handle formula queries
|
||||
if selectedQuery < "A" || selectedQuery > "Z" {
|
||||
return ""
|
||||
}
|
||||
|
||||
q := r.ruleCondition.CompositeQuery.BuilderQueries[selectedQuery]
|
||||
if q == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
if q.DataSource != v3.DataSourceTraces {
|
||||
return ""
|
||||
}
|
||||
|
||||
queryFilter := []v3.FilterItem{}
|
||||
if q.Filters != nil {
|
||||
queryFilter = q.Filters.Items
|
||||
}
|
||||
|
||||
filterItems := contextlinks.PrepareFilters(lbls.Map(), queryFilter, q.GroupBy, r.spansKeys)
|
||||
|
||||
return contextlinks.PrepareLinksToTraces(start, end, filterItems)
|
||||
}
|
||||
|
||||
func (r *ThresholdRule) prepareQueryRangeV5(ctx context.Context, ts time.Time) (*qbtypes.QueryRangeRequest, error) {
|
||||
r.logger.InfoContext(
|
||||
ctx, "prepare query range request v5", "ts", ts.UnixMilli(), "eval_window", r.evalWindow.Milliseconds(), "eval_delay", r.evalDelay.Milliseconds(),
|
||||
ctx, "prepare query range request", "ts", ts.UnixMilli(), "eval_window", r.evalWindow.Milliseconds(), "eval_delay", r.evalDelay.Milliseconds(),
|
||||
)
|
||||
|
||||
startTs, endTs := r.Timestamps(ts)
|
||||
@@ -302,10 +99,10 @@ func (r *ThresholdRule) prepareQueryRangeV5(ctx context.Context, ts time.Time) (
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (r *ThresholdRule) prepareLinksToLogsV5(ctx context.Context, ts time.Time, lbls labels.Labels) string {
|
||||
func (r *ThresholdRule) prepareLinksToLogs(ctx context.Context, ts time.Time, lbls labels.Labels) string {
|
||||
selectedQuery := r.GetSelectedQuery()
|
||||
|
||||
qr, err := r.prepareQueryRangeV5(ctx, ts)
|
||||
qr, err := r.prepareQueryRange(ctx, ts)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
@@ -342,10 +139,10 @@ func (r *ThresholdRule) prepareLinksToLogsV5(ctx context.Context, ts time.Time,
|
||||
return contextlinks.PrepareLinksToLogsV5(start, end, whereClause)
|
||||
}
|
||||
|
||||
func (r *ThresholdRule) prepareLinksToTracesV5(ctx context.Context, ts time.Time, lbls labels.Labels) string {
|
||||
func (r *ThresholdRule) prepareLinksToTraces(ctx context.Context, ts time.Time, lbls labels.Labels) string {
|
||||
selectedQuery := r.GetSelectedQuery()
|
||||
|
||||
qr, err := r.prepareQueryRangeV5(ctx, ts)
|
||||
qr, err := r.prepareQueryRange(ctx, ts)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
@@ -391,115 +188,6 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = r.PopulateTemporality(ctx, orgID, params)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("internal error while setting temporality")
|
||||
}
|
||||
|
||||
if params.CompositeQuery.QueryType == v3.QueryTypeBuilder {
|
||||
hasLogsQuery := false
|
||||
hasTracesQuery := false
|
||||
for _, query := range params.CompositeQuery.BuilderQueries {
|
||||
if query.DataSource == v3.DataSourceLogs {
|
||||
hasLogsQuery = true
|
||||
}
|
||||
if query.DataSource == v3.DataSourceTraces {
|
||||
hasTracesQuery = true
|
||||
}
|
||||
}
|
||||
|
||||
if hasLogsQuery {
|
||||
// check if any enrichment is required for logs if yes then enrich them
|
||||
if logsv3.EnrichmentRequired(params) {
|
||||
logsFields, apiErr := r.reader.GetLogFieldsFromNames(ctx, logsv3.GetFieldNames(params.CompositeQuery))
|
||||
if apiErr != nil {
|
||||
return nil, apiErr.ToError()
|
||||
}
|
||||
logsKeys := model.GetLogFieldsV3(ctx, params, logsFields)
|
||||
r.logsKeys = logsKeys
|
||||
logsv3.Enrich(params, logsKeys)
|
||||
}
|
||||
}
|
||||
|
||||
if hasTracesQuery {
|
||||
spanKeys, err := r.reader.GetSpanAttributeKeysByNames(ctx, logsv3.GetFieldNames(params.CompositeQuery))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.spansKeys = spanKeys
|
||||
tracesV4.Enrich(params, spanKeys)
|
||||
}
|
||||
}
|
||||
|
||||
var results []*v3.Result
|
||||
var queryErrors map[string]error
|
||||
|
||||
if r.version == "v4" {
|
||||
results, queryErrors, err = r.querierV2.QueryRange(ctx, orgID, params)
|
||||
} else {
|
||||
results, queryErrors, err = r.querier.QueryRange(ctx, orgID, params)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
r.logger.ErrorContext(ctx, "failed to get alert query range result", "rule_name", r.Name(), "error", err, "query_errors", queryErrors)
|
||||
return nil, fmt.Errorf("internal error while querying")
|
||||
}
|
||||
|
||||
if params.CompositeQuery.QueryType == v3.QueryTypeBuilder {
|
||||
results, err = postprocess.PostProcessResult(results, params)
|
||||
if err != nil {
|
||||
r.logger.ErrorContext(ctx, "failed to post process result", "rule_name", r.Name(), "error", err)
|
||||
return nil, fmt.Errorf("internal error while post processing")
|
||||
}
|
||||
}
|
||||
|
||||
selectedQuery := r.GetSelectedQuery()
|
||||
|
||||
var queryResult *v3.Result
|
||||
for _, res := range results {
|
||||
if res.QueryName == selectedQuery {
|
||||
queryResult = res
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
hasData := queryResult != nil && len(queryResult.Series) > 0
|
||||
if missingDataAlert := r.HandleMissingDataAlert(ctx, ts, hasData); missingDataAlert != nil {
|
||||
return ruletypes.Vector{*missingDataAlert}, nil
|
||||
}
|
||||
|
||||
var resultVector ruletypes.Vector
|
||||
|
||||
if queryResult == nil {
|
||||
r.logger.WarnContext(ctx, "query result is nil", "rule_name", r.Name(), "query_name", selectedQuery)
|
||||
return resultVector, nil
|
||||
}
|
||||
|
||||
for _, series := range queryResult.Series {
|
||||
if !r.Condition().ShouldEval(series) {
|
||||
r.logger.InfoContext(ctx, "not enough data points to evaluate series, skipping", "ruleid", r.ID(), "numPoints", len(series.Points), "requiredPoints", r.Condition().RequiredNumPoints)
|
||||
continue
|
||||
}
|
||||
resultSeries, err := r.Threshold.Eval(*series, r.Unit(), ruletypes.EvalData{
|
||||
ActiveAlerts: r.ActiveAlertsLabelFP(),
|
||||
SendUnmatched: r.ShouldSendUnmatched(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resultVector = append(resultVector, resultSeries...)
|
||||
}
|
||||
|
||||
return resultVector, nil
|
||||
}
|
||||
|
||||
func (r *ThresholdRule) buildAndRunQueryV5(ctx context.Context, orgID valuer.UUID, ts time.Time) (ruletypes.Vector, error) {
|
||||
params, err := r.prepareQueryRangeV5(ctx, ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var results []*v3.Result
|
||||
|
||||
v5Result, err := r.querierV5.QueryRange(ctx, orgID, params)
|
||||
if err != nil {
|
||||
@@ -507,26 +195,24 @@ func (r *ThresholdRule) buildAndRunQueryV5(ctx context.Context, orgID valuer.UUI
|
||||
return nil, fmt.Errorf("internal error while querying")
|
||||
}
|
||||
|
||||
for _, item := range v5Result.Data.Results {
|
||||
if tsData, ok := item.(*qbtypes.TimeSeriesData); ok {
|
||||
results = append(results, transition.ConvertV5TimeSeriesDataToV4Result(tsData))
|
||||
} else {
|
||||
// NOTE: should not happen but just to ensure we don't miss it if it happens for some reason
|
||||
r.logger.WarnContext(ctx, "expected qbtypes.TimeSeriesData but got", "item_type", reflect.TypeOf(item))
|
||||
}
|
||||
}
|
||||
|
||||
selectedQuery := r.GetSelectedQuery()
|
||||
|
||||
var queryResult *v3.Result
|
||||
for _, res := range results {
|
||||
if res.QueryName == selectedQuery {
|
||||
queryResult = res
|
||||
var queryResult *qbtypes.TimeSeriesData
|
||||
for _, item := range v5Result.Data.Results {
|
||||
if tsData, ok := item.(*qbtypes.TimeSeriesData); ok && tsData.QueryName == selectedQuery {
|
||||
queryResult = tsData
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
hasData := queryResult != nil && len(queryResult.Series) > 0
|
||||
var allSeries []*qbtypes.TimeSeries
|
||||
if queryResult != nil {
|
||||
for _, bucket := range queryResult.Aggregations {
|
||||
allSeries = append(allSeries, bucket.Series...)
|
||||
}
|
||||
}
|
||||
|
||||
hasData := len(allSeries) > 0
|
||||
if missingDataAlert := r.HandleMissingDataAlert(ctx, ts, hasData); missingDataAlert != nil {
|
||||
return ruletypes.Vector{*missingDataAlert}, nil
|
||||
}
|
||||
@@ -539,7 +225,7 @@ func (r *ThresholdRule) buildAndRunQueryV5(ctx context.Context, orgID valuer.UUI
|
||||
}
|
||||
|
||||
// Filter out new series if newGroupEvalDelay is configured
|
||||
seriesToProcess := queryResult.Series
|
||||
seriesToProcess := allSeries
|
||||
if r.ShouldSkipNewGroups() {
|
||||
filteredSeries, filterErr := r.BaseRule.FilterNewSeries(ctx, ts, seriesToProcess)
|
||||
// In case of error we log the error and continue with the original series
|
||||
@@ -552,7 +238,7 @@ func (r *ThresholdRule) buildAndRunQueryV5(ctx context.Context, orgID valuer.UUI
|
||||
|
||||
for _, series := range seriesToProcess {
|
||||
if !r.Condition().ShouldEval(series) {
|
||||
r.logger.InfoContext(ctx, "not enough data points to evaluate series, skipping", "ruleid", r.ID(), "numPoints", len(series.Points), "requiredPoints", r.Condition().RequiredNumPoints)
|
||||
r.logger.InfoContext(ctx, "not enough data points to evaluate series, skipping", "ruleid", r.ID(), "numPoints", len(series.Values), "requiredPoints", r.Condition().RequiredNumPoints)
|
||||
continue
|
||||
}
|
||||
resultSeries, err := r.Threshold.Eval(*series, r.Unit(), ruletypes.EvalData{
|
||||
@@ -573,16 +259,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (int, error) {
|
||||
|
||||
valueFormatter := formatter.FromUnit(r.Unit())
|
||||
|
||||
var res ruletypes.Vector
|
||||
var err error
|
||||
|
||||
if r.version == "v5" {
|
||||
r.logger.InfoContext(ctx, "running v5 query")
|
||||
res, err = r.buildAndRunQueryV5(ctx, r.orgID, ts)
|
||||
} else {
|
||||
r.logger.InfoContext(ctx, "running v4 query")
|
||||
res, err = r.buildAndRunQuery(ctx, r.orgID, ts)
|
||||
}
|
||||
res, err := r.buildAndRunQuery(ctx, r.orgID, ts)
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -483,22 +483,6 @@ func (v *filterExpressionVisitor) VisitComparison(ctx *grammar.ComparisonContext
|
||||
value1 := v.Visit(values[0])
|
||||
value2 := v.Visit(values[1])
|
||||
|
||||
switch value1.(type) {
|
||||
case float64:
|
||||
if _, ok := value2.(float64); !ok {
|
||||
v.errors = append(v.errors, fmt.Sprintf("value type mismatch for key %s: expected number for both operands", keys[0].Name))
|
||||
return ""
|
||||
}
|
||||
case string:
|
||||
if _, ok := value2.(string); !ok {
|
||||
v.errors = append(v.errors, fmt.Sprintf("value type mismatch for key %s: expected string for both operands", keys[0].Name))
|
||||
return ""
|
||||
}
|
||||
default:
|
||||
v.errors = append(v.errors, fmt.Sprintf("value type mismatch for key %s: operands must be number or string", keys[0].Name))
|
||||
return ""
|
||||
}
|
||||
|
||||
var conds []string
|
||||
for _, key := range keys {
|
||||
condition, err := v.conditionBuilder.ConditionFor(context.Background(), key, op, []any{value1, value2}, v.builder, v.startNs, v.endNs)
|
||||
@@ -871,7 +855,7 @@ func (v *filterExpressionVisitor) VisitKey(ctx *grammar.KeyContext) any {
|
||||
// 1. either user meant key ( this is already handled above in fieldKeysForName )
|
||||
// 2. or user meant `attribute.key` we look up in the map for all possible field keys with name 'attribute.key'
|
||||
|
||||
// Note:
|
||||
// Note:
|
||||
// If user only wants to search `attribute.key`, then they have to use `attribute.attribute.key`
|
||||
// If user only wants to search `key`, then they have to use `key`
|
||||
// If user wants to search both, they can use `attribute.key` and we will resolve the ambiguity
|
||||
|
||||
@@ -375,6 +375,13 @@ func mergeAndEnsureBackwardCompatibility(ctx context.Context, logger *slog.Logge
|
||||
config.Flagger.Config.Boolean[flagger.FeatureKafkaSpanEval.String()] = os.Getenv("KAFKA_SPAN_EVAL") == "true"
|
||||
}
|
||||
|
||||
if os.Getenv("INTERPOLATION_ENABLED") != "" {
|
||||
logger.WarnContext(ctx, "[Deprecated] env INTERPOLATION_ENABLED is deprecated and scheduled for removal. Please use SIGNOZ_FLAGGER_CONFIG_BOOLEAN_INTERPOLATION__ENABLED instead.")
|
||||
if config.Flagger.Config.Boolean == nil {
|
||||
config.Flagger.Config.Boolean = make(map[string]bool)
|
||||
}
|
||||
config.Flagger.Config.Boolean[flagger.FeatureInterpolationEnabled.String()] = os.Getenv("INTERPOLATION_ENABLED") == "true"
|
||||
}
|
||||
}
|
||||
|
||||
func (config Config) Collect(_ context.Context, _ valuer.UUID) (map[string]any, error) {
|
||||
|
||||
@@ -167,6 +167,7 @@ func NewSQLMigrationProviderFactories(
|
||||
sqlmigration.NewMigrateRbacToAuthzFactory(sqlstore),
|
||||
sqlmigration.NewMigratePublicDashboardsFactory(sqlstore),
|
||||
sqlmigration.NewAddAnonymousPublicDashboardTransactionFactory(sqlstore),
|
||||
sqlmigration.NewMigrateRulesV4ToV5Factory(sqlstore, telemetryStore),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
194
pkg/sqlmigration/064_migrate_rules_v4_to_v5.go
Normal file
194
pkg/sqlmigration/064_migrate_rules_v4_to_v5.go
Normal file
@@ -0,0 +1,194 @@
|
||||
package sqlmigration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"log/slog"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/transition"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
)
|
||||
|
||||
type migrateRulesV4ToV5 struct {
|
||||
store sqlstore.SQLStore
|
||||
telemetryStore telemetrystore.TelemetryStore
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
func NewMigrateRulesV4ToV5Factory(
|
||||
store sqlstore.SQLStore,
|
||||
telemetryStore telemetrystore.TelemetryStore,
|
||||
) factory.ProviderFactory[SQLMigration, Config] {
|
||||
return factory.NewProviderFactory(
|
||||
factory.MustNewName("migrate_rules_v4_to_v5"),
|
||||
func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
|
||||
return &migrateRulesV4ToV5{
|
||||
store: store,
|
||||
telemetryStore: telemetryStore,
|
||||
logger: ps.Logger,
|
||||
}, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (migration *migrateRulesV4ToV5) Register(migrations *migrate.Migrations) error {
|
||||
if err := migrations.Register(migration.Up, migration.Down); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *migrateRulesV4ToV5) getLogDuplicateKeys(ctx context.Context) ([]string, error) {
|
||||
query := `
|
||||
SELECT name
|
||||
FROM (
|
||||
SELECT DISTINCT name FROM signoz_logs.distributed_logs_attribute_keys
|
||||
INTERSECT
|
||||
SELECT DISTINCT name FROM signoz_logs.distributed_logs_resource_keys
|
||||
)
|
||||
ORDER BY name
|
||||
`
|
||||
|
||||
rows, err := migration.telemetryStore.ClickhouseDB().Query(ctx, query)
|
||||
if err != nil {
|
||||
migration.logger.WarnContext(ctx, "failed to query log duplicate keys", "error", err)
|
||||
return nil, nil
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var keys []string
|
||||
for rows.Next() {
|
||||
var key string
|
||||
if err := rows.Scan(&key); err != nil {
|
||||
migration.logger.WarnContext(ctx, "failed to scan log duplicate key", "error", err)
|
||||
continue
|
||||
}
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (migration *migrateRulesV4ToV5) getTraceDuplicateKeys(ctx context.Context) ([]string, error) {
|
||||
query := `
|
||||
SELECT tagKey
|
||||
FROM signoz_traces.distributed_span_attributes_keys
|
||||
WHERE tagType IN ('tag', 'resource')
|
||||
GROUP BY tagKey
|
||||
HAVING COUNT(DISTINCT tagType) > 1
|
||||
ORDER BY tagKey
|
||||
`
|
||||
|
||||
rows, err := migration.telemetryStore.ClickhouseDB().Query(ctx, query)
|
||||
if err != nil {
|
||||
migration.logger.WarnContext(ctx, "failed to query trace duplicate keys", "error", err)
|
||||
return nil, nil
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var keys []string
|
||||
for rows.Next() {
|
||||
var key string
|
||||
if err := rows.Scan(&key); err != nil {
|
||||
migration.logger.WarnContext(ctx, "failed to scan trace duplicate key", "error", err)
|
||||
continue
|
||||
}
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (migration *migrateRulesV4ToV5) Up(ctx context.Context, db *bun.DB) error {
|
||||
logsKeys, err := migration.getLogDuplicateKeys(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tracesKeys, err := migration.getTraceDuplicateKeys(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tx, err := db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = tx.Rollback()
|
||||
}()
|
||||
|
||||
var rules []struct {
|
||||
ID string `bun:"id"`
|
||||
Data map[string]any `bun:"data"`
|
||||
}
|
||||
|
||||
err = tx.NewSelect().
|
||||
Table("rule").
|
||||
Column("id", "data").
|
||||
Scan(ctx, &rules)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
alertsMigrator := transition.NewAlertMigrateV5(migration.logger, logsKeys, tracesKeys)
|
||||
|
||||
for _, rule := range rules {
|
||||
version, _ := rule.Data["version"].(string)
|
||||
if version == "v5" {
|
||||
continue
|
||||
}
|
||||
|
||||
migration.logger.InfoContext(ctx, "migrating rule v4 to v5", "rule_id", rule.ID, "current_version", version)
|
||||
|
||||
// Check if the queries envelope already exists and is non-empty
|
||||
hasQueriesEnvelope := false
|
||||
if condition, ok := rule.Data["condition"].(map[string]any); ok {
|
||||
if compositeQuery, ok := condition["compositeQuery"].(map[string]any); ok {
|
||||
if queries, ok := compositeQuery["queries"].([]any); ok && len(queries) > 0 {
|
||||
hasQueriesEnvelope = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if hasQueriesEnvelope {
|
||||
// Case 2: Already has queries envelope, just bump version
|
||||
migration.logger.InfoContext(ctx, "rule already has queries envelope, bumping version", "rule_id", rule.ID)
|
||||
rule.Data["version"] = "v5"
|
||||
} else {
|
||||
// Case 1: Old format, run full migration
|
||||
migration.logger.InfoContext(ctx, "rule has old format, running full migration", "rule_id", rule.ID)
|
||||
alertsMigrator.Migrate(ctx, rule.Data)
|
||||
// Force version to v5 regardless of Migrate return value
|
||||
rule.Data["version"] = "v5"
|
||||
}
|
||||
|
||||
dataJSON, err := json.Marshal(rule.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.NewUpdate().
|
||||
Table("rule").
|
||||
Set("data = ?", string(dataJSON)).
|
||||
Where("id = ?", rule.ID).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (migration *migrateRulesV4ToV5) Down(ctx context.Context, db *bun.DB) error {
|
||||
return nil
|
||||
}
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/querybuilder"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
|
||||
@@ -74,7 +73,7 @@ func (b *meterQueryStatementBuilder) buildPipelineStatement(
|
||||
cteArgs [][]any
|
||||
)
|
||||
|
||||
if qbtypes.CanShortCircuitDelta(query.Aggregations[0]) {
|
||||
if b.metricsStatementBuilder.CanShortCircuitDelta(query) {
|
||||
// spatial_aggregation_cte directly for certain delta queries
|
||||
if frag, args, err := b.buildTemporalAggDeltaFastPath(ctx, start, end, query, keys, variables); err != nil {
|
||||
return nil, err
|
||||
@@ -92,9 +91,8 @@ func (b *meterQueryStatementBuilder) buildPipelineStatement(
|
||||
}
|
||||
|
||||
// spatial_aggregation_cte
|
||||
if frag, args, err := b.buildSpatialAggregationCTE(ctx, start, end, query, keys); err != nil {
|
||||
return nil, err
|
||||
} else if frag != "" {
|
||||
frag, args := b.buildSpatialAggregationCTE(ctx, start, end, query, keys)
|
||||
if frag != "" {
|
||||
cteFragments = append(cteFragments, frag)
|
||||
cteArgs = append(cteArgs, args)
|
||||
}
|
||||
@@ -124,16 +122,13 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDeltaFastPath(
|
||||
for _, g := range query.GroupBy {
|
||||
col, err := b.fm.ColumnExpressionFor(ctx, &g.TelemetryFieldKey, keys)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return "", []any{}, err
|
||||
}
|
||||
sb.SelectMore(col)
|
||||
}
|
||||
|
||||
tbl := WhichSamplesTableToUse(start, end, query.Aggregations[0].Type, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
|
||||
aggCol = fmt.Sprintf("%s/%d", aggCol, stepSec)
|
||||
}
|
||||
@@ -155,7 +150,7 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDeltaFastPath(
|
||||
Variables: variables,
|
||||
}, start, end)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return "", []any{}, err
|
||||
}
|
||||
}
|
||||
if filterWhere != nil {
|
||||
@@ -213,11 +208,8 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDelta(
|
||||
}
|
||||
|
||||
tbl := WhichSamplesTableToUse(start, end, query.Aggregations[0].Type, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality,
|
||||
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality,
|
||||
query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
|
||||
aggCol = fmt.Sprintf("%s/%d", aggCol, stepSec)
|
||||
}
|
||||
@@ -286,10 +278,7 @@ func (b *meterQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
|
||||
}
|
||||
|
||||
tbl := WhichSamplesTableToUse(start, end, query.Aggregations[0].Type, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
baseSb.SelectMore(fmt.Sprintf("%s AS per_series_value", aggCol))
|
||||
|
||||
baseSb.From(fmt.Sprintf("%s.%s AS points", DBName, tbl))
|
||||
@@ -326,23 +315,25 @@ func (b *meterQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
|
||||
|
||||
switch query.Aggregations[0].TimeAggregation {
|
||||
case metrictypes.TimeAggregationRate:
|
||||
rateExpr := fmt.Sprintf(telemetrymetrics.RateWithoutNegative, start, start)
|
||||
wrapped := sqlbuilder.NewSelectBuilder()
|
||||
wrapped.Select("ts")
|
||||
for _, g := range query.GroupBy {
|
||||
wrapped.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", telemetrymetrics.RateTmpl))
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", rateExpr))
|
||||
wrapped.From(fmt.Sprintf("(%s) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)", innerQuery))
|
||||
q, args := wrapped.BuildWithFlavor(sqlbuilder.ClickHouse, innerArgs...)
|
||||
return fmt.Sprintf("__temporal_aggregation_cte AS (%s)", q), args, nil
|
||||
|
||||
case metrictypes.TimeAggregationIncrease:
|
||||
incExpr := fmt.Sprintf(telemetrymetrics.IncreaseWithoutNegative, start, start)
|
||||
wrapped := sqlbuilder.NewSelectBuilder()
|
||||
wrapped.Select("ts")
|
||||
for _, g := range query.GroupBy {
|
||||
wrapped.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", telemetrymetrics.IncreaseTmpl))
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", incExpr))
|
||||
wrapped.From(fmt.Sprintf("(%s) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)", innerQuery))
|
||||
q, args := wrapped.BuildWithFlavor(sqlbuilder.ClickHouse, innerArgs...)
|
||||
return fmt.Sprintf("__temporal_aggregation_cte AS (%s)", q), args, nil
|
||||
@@ -357,15 +348,7 @@ func (b *meterQueryStatementBuilder) buildSpatialAggregationCTE(
|
||||
_ uint64,
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
|
||||
_ map[string][]*telemetrytypes.TelemetryFieldKey,
|
||||
) (string, []any, error) {
|
||||
|
||||
if query.Aggregations[0].SpaceAggregation.IsZero() {
|
||||
return "", nil, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"invalid space aggregation, should be one of the following: [`sum`, `avg`, `min`, `max`, `count`]",
|
||||
)
|
||||
}
|
||||
) (string, []any) {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
|
||||
sb.Select("ts")
|
||||
@@ -382,5 +365,5 @@ func (b *meterQueryStatementBuilder) buildSpatialAggregationCTE(
|
||||
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
|
||||
q, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args, nil
|
||||
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package telemetrymeter
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
)
|
||||
|
||||
@@ -64,7 +63,7 @@ func AggregationColumnForSamplesTable(
|
||||
temporality metrictypes.Temporality,
|
||||
timeAggregation metrictypes.TimeAggregation,
|
||||
tableHints *metrictypes.MetricTableHints,
|
||||
) (string, error) {
|
||||
) string {
|
||||
tableName := WhichSamplesTableToUse(start, end, metricType, timeAggregation, tableHints)
|
||||
var aggregationColumn string
|
||||
switch temporality {
|
||||
@@ -191,13 +190,5 @@ func AggregationColumnForSamplesTable(
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if aggregationColumn == "" {
|
||||
return "", errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"invalid time aggregation, should be one of the following: [`latest`, `sum`, `avg`, `min`, `max`, `count`, `rate`, `increase`]",
|
||||
)
|
||||
}
|
||||
return aggregationColumn, nil
|
||||
return aggregationColumn
|
||||
}
|
||||
|
||||
@@ -29,7 +29,13 @@ func (c *conditionBuilder) conditionFor(
|
||||
sb *sqlbuilder.SelectBuilder,
|
||||
) (string, error) {
|
||||
|
||||
if operator.IsStringSearchOperator() {
|
||||
switch operator {
|
||||
case qbtypes.FilterOperatorContains,
|
||||
qbtypes.FilterOperatorNotContains,
|
||||
qbtypes.FilterOperatorILike,
|
||||
qbtypes.FilterOperatorNotILike,
|
||||
qbtypes.FilterOperatorLike,
|
||||
qbtypes.FilterOperatorNotLike:
|
||||
value = querybuilder.FormatValueForContains(value)
|
||||
}
|
||||
|
||||
@@ -38,18 +44,6 @@ func (c *conditionBuilder) conditionFor(
|
||||
return "", err
|
||||
}
|
||||
|
||||
// TODO(srikanthccv): use the same data type collision handling when metrics schemas are updated
|
||||
switch v := value.(type) {
|
||||
case float64:
|
||||
tblFieldName = fmt.Sprintf("toFloat64OrNull(%s)", tblFieldName)
|
||||
case []any:
|
||||
if len(v) > 0 && (operator == qbtypes.FilterOperatorBetween || operator == qbtypes.FilterOperatorNotBetween) {
|
||||
if _, ok := v[0].(float64); ok {
|
||||
tblFieldName = fmt.Sprintf("toFloat64OrNull(%s)", tblFieldName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch operator {
|
||||
case qbtypes.FilterOperatorEqual:
|
||||
return sb.E(tblFieldName, value), nil
|
||||
|
||||
@@ -5,27 +5,67 @@ import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/flagger"
|
||||
"github.com/SigNoz/signoz/pkg/querybuilder"
|
||||
"github.com/SigNoz/signoz/pkg/types/featuretypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
const (
|
||||
RateTmpl = `multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window))`
|
||||
RateWithoutNegative = `If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))`
|
||||
IncreaseWithoutNegative = `If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value, ((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window)) * (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))`
|
||||
|
||||
IncreaseTmpl = `multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value, per_series_value - lagInFrame(per_series_value, 1) OVER rate_window)`
|
||||
RateWithoutNegativeMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, IF((%s - lagInFrame(%s, 1, 0) OVER rate_window) < 0, %s / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window), (%s - lagInFrame(%s, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))) AS per_series_value`
|
||||
IncreaseWithoutNegativeMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, IF((%s - lagInFrame(%s, 1, 0) OVER rate_window) < 0, %s, ((%s - lagInFrame(%s, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window)) * (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))) AS per_series_value`
|
||||
OthersMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, %s) AS per_series_value`
|
||||
|
||||
RateWithoutNegativeMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, multiIf(row_number() OVER rate_window = 1, nan, (%s - lagInFrame(%s, 1) OVER rate_window) < 0, %s / (ts - lagInFrame(ts, 1) OVER rate_window), (%s - lagInFrame(%s, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window))) AS per_series_value`
|
||||
RateWithInterpolation = `
|
||||
CASE
|
||||
WHEN row_number() OVER rate_window = 1 THEN
|
||||
-- First row: try to interpolate using next value
|
||||
CASE
|
||||
WHEN leadInFrame(per_series_value, 1) OVER rate_window IS NOT NULL THEN
|
||||
-- Assume linear growth to next point
|
||||
(leadInFrame(per_series_value, 1) OVER rate_window - per_series_value) /
|
||||
(leadInFrame(ts, 1) OVER rate_window - ts)
|
||||
ELSE
|
||||
0 -- No next value either, can't interpolate
|
||||
END
|
||||
WHEN (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0 THEN
|
||||
-- Counter reset detected
|
||||
per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window)
|
||||
ELSE
|
||||
-- Normal case: calculate rate
|
||||
(per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) /
|
||||
(ts - lagInFrame(ts, 1) OVER rate_window)
|
||||
END`
|
||||
|
||||
IncreaseWithoutNegativeMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, multiIf(row_number() OVER rate_window = 1, nan, (%s - lagInFrame(%s, 1) OVER rate_window) < 0, %s, (%s - lagInFrame(%s, 1) OVER rate_window))) AS per_series_value`
|
||||
|
||||
OthersMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, %s) AS per_series_value`
|
||||
IncreaseWithInterpolation = `
|
||||
CASE
|
||||
WHEN row_number() OVER rate_window = 1 THEN
|
||||
-- First row: try to interpolate using next value
|
||||
CASE
|
||||
WHEN leadInFrame(per_series_value, 1) OVER rate_window IS NOT NULL THEN
|
||||
-- Calculate the interpolated increase for this interval
|
||||
((leadInFrame(per_series_value, 1) OVER rate_window - per_series_value) /
|
||||
(leadInFrame(ts, 1) OVER rate_window - ts)) *
|
||||
(leadInFrame(ts, 1) OVER rate_window - ts)
|
||||
ELSE
|
||||
0 -- No next value either, can't interpolate
|
||||
END
|
||||
WHEN (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0 THEN
|
||||
-- Counter reset detected: the increase is the current value
|
||||
per_series_value
|
||||
ELSE
|
||||
-- Normal case: calculate increase
|
||||
(per_series_value - lagInFrame(per_series_value, 1) OVER rate_window)
|
||||
END`
|
||||
)
|
||||
|
||||
type MetricQueryStatementBuilder struct {
|
||||
@@ -107,6 +147,54 @@ func (b *MetricQueryStatementBuilder) Build(
|
||||
return b.buildPipelineStatement(ctx, start, end, query, keys, variables)
|
||||
}
|
||||
|
||||
// Fast‑path (no fingerprint grouping)
|
||||
// canShortCircuitDelta returns true if we can use the optimized query
|
||||
// for the given query
|
||||
// This is used to avoid the group by fingerprint thus improving the performance
|
||||
// for certain queries
|
||||
// cases where we can short circuit:
|
||||
// 1. time aggregation = (rate|increase) and space aggregation = sum
|
||||
// - rate = sum(value)/step, increase = sum(value) - sum of sums is same as sum of all values
|
||||
//
|
||||
// 2. time aggregation = sum and space aggregation = sum
|
||||
// - sum of sums is same as sum of all values
|
||||
//
|
||||
// 3. time aggregation = min and space aggregation = min
|
||||
// - min of mins is same as min of all values
|
||||
//
|
||||
// 4. time aggregation = max and space aggregation = max
|
||||
// - max of maxs is same as max of all values
|
||||
//
|
||||
// 5. special case exphist, there is no need for per series/fingerprint aggregation
|
||||
// we can directly use the quantilesDDMerge function
|
||||
//
|
||||
// all of this is true only for delta metrics
|
||||
func (b *MetricQueryStatementBuilder) CanShortCircuitDelta(q qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]) bool {
|
||||
if q.Aggregations[0].Temporality != metrictypes.Delta {
|
||||
return false
|
||||
}
|
||||
|
||||
ta := q.Aggregations[0].TimeAggregation
|
||||
sa := q.Aggregations[0].SpaceAggregation
|
||||
|
||||
if (ta == metrictypes.TimeAggregationRate || ta == metrictypes.TimeAggregationIncrease) && sa == metrictypes.SpaceAggregationSum {
|
||||
return true
|
||||
}
|
||||
if ta == metrictypes.TimeAggregationSum && sa == metrictypes.SpaceAggregationSum {
|
||||
return true
|
||||
}
|
||||
if ta == metrictypes.TimeAggregationMin && sa == metrictypes.SpaceAggregationMin {
|
||||
return true
|
||||
}
|
||||
if ta == metrictypes.TimeAggregationMax && sa == metrictypes.SpaceAggregationMax {
|
||||
return true
|
||||
}
|
||||
if q.Aggregations[0].Type == metrictypes.ExpHistogramType && sa.IsPercentile() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *MetricQueryStatementBuilder) buildPipelineStatement(
|
||||
ctx context.Context,
|
||||
start, end uint64,
|
||||
@@ -168,11 +256,10 @@ func (b *MetricQueryStatementBuilder) buildPipelineStatement(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if qbtypes.CanShortCircuitDelta(query.Aggregations[0]) {
|
||||
if b.CanShortCircuitDelta(query) {
|
||||
// spatial_aggregation_cte directly for certain delta queries
|
||||
if frag, args, err := b.buildTemporalAggDeltaFastPath(start, end, query, timeSeriesCTE, timeSeriesCTEArgs); err != nil {
|
||||
return nil, err
|
||||
} else if frag != "" {
|
||||
frag, args := b.buildTemporalAggDeltaFastPath(start, end, query, timeSeriesCTE, timeSeriesCTEArgs)
|
||||
if frag != "" {
|
||||
cteFragments = append(cteFragments, frag)
|
||||
cteArgs = append(cteArgs, args)
|
||||
}
|
||||
@@ -186,9 +273,8 @@ func (b *MetricQueryStatementBuilder) buildPipelineStatement(
|
||||
}
|
||||
|
||||
// spatial_aggregation_cte
|
||||
if frag, args, err := b.buildSpatialAggregationCTE(ctx, start, end, query, keys); err != nil {
|
||||
return nil, err
|
||||
} else if frag != "" {
|
||||
frag, args := b.buildSpatialAggregationCTE(ctx, start, end, query, keys)
|
||||
if frag != "" {
|
||||
cteFragments = append(cteFragments, frag)
|
||||
cteArgs = append(cteArgs, args)
|
||||
}
|
||||
@@ -208,7 +294,7 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDeltaFastPath(
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
|
||||
timeSeriesCTE string,
|
||||
timeSeriesCTEArgs []any,
|
||||
) (string, []any, error) {
|
||||
) (string, []any) {
|
||||
stepSec := int64(query.StepInterval.Seconds())
|
||||
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
@@ -221,15 +307,11 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDeltaFastPath(
|
||||
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
|
||||
aggCol, err := AggregationColumnForSamplesTable(
|
||||
aggCol := AggregationColumnForSamplesTable(
|
||||
start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality,
|
||||
query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints,
|
||||
)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
|
||||
// TODO(srikanthccv): should it be step interval or use [start_time_unix_nano](https://github.com/open-telemetry/opentelemetry-proto/blob/d3fb76d70deb0874692bd0ebe03148580d85f3bb/opentelemetry/proto/metrics/v1/metrics.proto#L400C11-L400C31)?
|
||||
aggCol = fmt.Sprintf("%s/%d", aggCol, stepSec)
|
||||
}
|
||||
|
||||
@@ -252,7 +334,7 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDeltaFastPath(
|
||||
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
|
||||
q, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse, timeSeriesCTEArgs...)
|
||||
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args, nil
|
||||
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args
|
||||
}
|
||||
|
||||
func (b *MetricQueryStatementBuilder) buildTimeSeriesCTE(
|
||||
@@ -355,12 +437,8 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDelta(
|
||||
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
|
||||
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
|
||||
// TODO(srikanthccv): should it be step interval or use [start_time_unix_nano](https://github.com/open-telemetry/opentelemetry-proto/blob/d3fb76d70deb0874692bd0ebe03148580d85f3bb/opentelemetry/proto/metrics/v1/metrics.proto#L400C11-L400C31)?
|
||||
aggCol = fmt.Sprintf("%s/%d", aggCol, stepSec)
|
||||
}
|
||||
|
||||
@@ -383,7 +461,7 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDelta(
|
||||
}
|
||||
|
||||
func (b *MetricQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
|
||||
_ context.Context,
|
||||
ctx context.Context,
|
||||
start, end uint64,
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
|
||||
timeSeriesCTE string,
|
||||
@@ -401,10 +479,7 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
|
||||
baseSb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
|
||||
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
baseSb.SelectMore(fmt.Sprintf("%s AS per_series_value", aggCol))
|
||||
|
||||
tbl := WhichSamplesTableToUse(start, end, query.Aggregations[0].Type, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
@@ -421,25 +496,36 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
|
||||
|
||||
innerQuery, innerArgs := baseSb.BuildWithFlavor(sqlbuilder.ClickHouse, timeSeriesCTEArgs...)
|
||||
|
||||
// ! TODO (balanikaran) Get OrgID via function parameter instead of valuer.GenerateUUID()
|
||||
interpolationEnabled := b.flagger.BooleanOrEmpty(ctx, flagger.FeatureInterpolationEnabled, featuretypes.NewFlaggerEvaluationContext(valuer.GenerateUUID()))
|
||||
|
||||
switch query.Aggregations[0].TimeAggregation {
|
||||
case metrictypes.TimeAggregationRate:
|
||||
rateExpr := fmt.Sprintf(RateWithoutNegative, start, start)
|
||||
if interpolationEnabled {
|
||||
rateExpr = RateWithInterpolation
|
||||
}
|
||||
wrapped := sqlbuilder.NewSelectBuilder()
|
||||
wrapped.Select("ts")
|
||||
for _, g := range query.GroupBy {
|
||||
wrapped.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", RateTmpl))
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", rateExpr))
|
||||
wrapped.From(fmt.Sprintf("(%s) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)", innerQuery))
|
||||
q, args := wrapped.BuildWithFlavor(sqlbuilder.ClickHouse, innerArgs...)
|
||||
return fmt.Sprintf("__temporal_aggregation_cte AS (%s)", q), args, nil
|
||||
|
||||
case metrictypes.TimeAggregationIncrease:
|
||||
incExpr := fmt.Sprintf(IncreaseWithoutNegative, start, start)
|
||||
if interpolationEnabled {
|
||||
incExpr = IncreaseWithInterpolation
|
||||
}
|
||||
wrapped := sqlbuilder.NewSelectBuilder()
|
||||
wrapped.Select("ts")
|
||||
for _, g := range query.GroupBy {
|
||||
wrapped.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", IncreaseTmpl))
|
||||
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", incExpr))
|
||||
wrapped.From(fmt.Sprintf("(%s) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)", innerQuery))
|
||||
q, args := wrapped.BuildWithFlavor(sqlbuilder.ClickHouse, innerArgs...)
|
||||
return fmt.Sprintf("__temporal_aggregation_cte AS (%s)", q), args, nil
|
||||
@@ -448,6 +534,7 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
|
||||
}
|
||||
}
|
||||
|
||||
// because RateInterpolation is not enabled anywhere due to some gaps in the logic wrt cache handling, it hasn't been considered for the multi temporality
|
||||
func (b *MetricQueryStatementBuilder) buildTemporalAggForMultipleTemporalities(
|
||||
_ context.Context,
|
||||
start, end uint64,
|
||||
@@ -466,32 +553,18 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggForMultipleTemporalities(
|
||||
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
|
||||
aggForDeltaTemporality, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, metrictypes.Delta, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
aggForCumulativeTemporality, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, metrictypes.Cumulative, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
aggForDeltaTemporality := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, metrictypes.Delta, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
aggForCumulativeTemporality := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, metrictypes.Cumulative, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
|
||||
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
|
||||
aggForDeltaTemporality = fmt.Sprintf("%s/%d", aggForDeltaTemporality, stepSec)
|
||||
}
|
||||
|
||||
switch query.Aggregations[0].TimeAggregation {
|
||||
case metrictypes.TimeAggregationRate:
|
||||
rateExpr := fmt.Sprintf(RateWithoutNegativeMultiTemporality,
|
||||
aggForDeltaTemporality,
|
||||
aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality,
|
||||
aggForCumulativeTemporality, aggForCumulativeTemporality,
|
||||
)
|
||||
rateExpr := fmt.Sprintf(RateWithoutNegativeMultiTemporality, aggForDeltaTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, start, aggForCumulativeTemporality, aggForCumulativeTemporality, start)
|
||||
sb.SelectMore(rateExpr)
|
||||
case metrictypes.TimeAggregationIncrease:
|
||||
increaseExpr := fmt.Sprintf(IncreaseWithoutNegativeMultiTemporality,
|
||||
aggForDeltaTemporality,
|
||||
aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality,
|
||||
aggForCumulativeTemporality, aggForCumulativeTemporality,
|
||||
)
|
||||
increaseExpr := fmt.Sprintf(IncreaseWithoutNegativeMultiTemporality, aggForDeltaTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, start, start)
|
||||
sb.SelectMore(increaseExpr)
|
||||
default:
|
||||
expr := fmt.Sprintf(OthersMultiTemporality, aggForDeltaTemporality, aggForCumulativeTemporality)
|
||||
@@ -519,14 +592,7 @@ func (b *MetricQueryStatementBuilder) buildSpatialAggregationCTE(
|
||||
_ uint64,
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
|
||||
_ map[string][]*telemetrytypes.TelemetryFieldKey,
|
||||
) (string, []any, error) {
|
||||
if query.Aggregations[0].SpaceAggregation.IsZero() {
|
||||
return "", nil, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"invalid space aggregation, should be one of the following: [`sum`, `avg`, `min`, `max`, `count`, `p50`, `p75`, `p90`, `p95`, `p99`]",
|
||||
)
|
||||
}
|
||||
) (string, []any) {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
|
||||
sb.Select("ts")
|
||||
@@ -543,7 +609,7 @@ func (b *MetricQueryStatementBuilder) buildSpatialAggregationCTE(
|
||||
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
|
||||
q, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args, nil
|
||||
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args
|
||||
}
|
||||
|
||||
func (b *MetricQueryStatementBuilder) BuildFinalSelect(
|
||||
@@ -575,7 +641,9 @@ func (b *MetricQueryStatementBuilder) BuildFinalSelect(
|
||||
quantile,
|
||||
))
|
||||
sb.From("__spatial_aggregation_cte")
|
||||
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
for _, g := range query.GroupBy {
|
||||
sb.GroupBy(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
sb.GroupBy("ts")
|
||||
if query.Having != nil && query.Having.Expression != "" {
|
||||
rewriter := querybuilder.NewHavingExpressionRewriter()
|
||||
@@ -591,8 +659,6 @@ func (b *MetricQueryStatementBuilder) BuildFinalSelect(
|
||||
sb.Where(rewrittenExpr)
|
||||
}
|
||||
}
|
||||
sb.OrderBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
sb.OrderBy("ts")
|
||||
|
||||
q, a := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
return &qbtypes.Statement{Query: combined + q, Args: append(args, a...)}, nil
|
||||
|
||||
@@ -3,7 +3,6 @@ package telemetrymetrics
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
)
|
||||
|
||||
@@ -169,7 +168,7 @@ func AggregationColumnForSamplesTable(
|
||||
temporality metrictypes.Temporality,
|
||||
timeAggregation metrictypes.TimeAggregation,
|
||||
tableHints *metrictypes.MetricTableHints,
|
||||
) (string, error) {
|
||||
) string {
|
||||
tableName := WhichSamplesTableToUse(start, end, metricType, timeAggregation, tableHints)
|
||||
var aggregationColumn string
|
||||
switch temporality {
|
||||
@@ -299,12 +298,5 @@ func AggregationColumnForSamplesTable(
|
||||
}
|
||||
}
|
||||
}
|
||||
if aggregationColumn == "" {
|
||||
return "", errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"invalid time aggregation, should be one of the following: [`latest`, `sum`, `avg`, `min`, `max`, `count`, `rate`, `increase`]",
|
||||
)
|
||||
}
|
||||
return aggregationColumn, nil
|
||||
return aggregationColumn
|
||||
}
|
||||
|
||||
@@ -35,7 +35,13 @@ func (c *conditionBuilder) conditionFor(
|
||||
sb *sqlbuilder.SelectBuilder,
|
||||
) (string, error) {
|
||||
|
||||
if operator.IsStringSearchOperator() {
|
||||
switch operator {
|
||||
case qbtypes.FilterOperatorContains,
|
||||
qbtypes.FilterOperatorNotContains,
|
||||
qbtypes.FilterOperatorILike,
|
||||
qbtypes.FilterOperatorNotILike,
|
||||
qbtypes.FilterOperatorLike,
|
||||
qbtypes.FilterOperatorNotLike:
|
||||
value = querybuilder.FormatValueForContains(value)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,102 +0,0 @@
|
||||
package transition
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
)
|
||||
|
||||
// ConvertV5TimeSeriesDataToV4Result converts v5 TimeSeriesData to v4 Result
|
||||
func ConvertV5TimeSeriesDataToV4Result(v5Data *qbtypes.TimeSeriesData) *v3.Result {
|
||||
if v5Data == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := &v3.Result{
|
||||
QueryName: v5Data.QueryName,
|
||||
Series: make([]*v3.Series, 0),
|
||||
}
|
||||
|
||||
toV4Series := func(ts *qbtypes.TimeSeries) *v3.Series {
|
||||
series := &v3.Series{
|
||||
Labels: make(map[string]string),
|
||||
LabelsArray: make([]map[string]string, 0),
|
||||
Points: make([]v3.Point, 0, len(ts.Values)),
|
||||
}
|
||||
|
||||
for _, label := range ts.Labels {
|
||||
valueStr := fmt.Sprintf("%v", label.Value)
|
||||
series.Labels[label.Key.Name] = valueStr
|
||||
}
|
||||
|
||||
if len(series.Labels) > 0 {
|
||||
series.LabelsArray = append(series.LabelsArray, series.Labels)
|
||||
}
|
||||
|
||||
for _, tsValue := range ts.Values {
|
||||
if tsValue.Partial {
|
||||
continue
|
||||
}
|
||||
|
||||
point := v3.Point{
|
||||
Timestamp: tsValue.Timestamp,
|
||||
Value: tsValue.Value,
|
||||
}
|
||||
series.Points = append(series.Points, point)
|
||||
}
|
||||
return series
|
||||
}
|
||||
|
||||
for _, aggBucket := range v5Data.Aggregations {
|
||||
for _, ts := range aggBucket.Series {
|
||||
result.Series = append(result.Series, toV4Series(ts))
|
||||
}
|
||||
|
||||
if len(aggBucket.AnomalyScores) != 0 {
|
||||
result.AnomalyScores = make([]*v3.Series, 0)
|
||||
for _, ts := range aggBucket.AnomalyScores {
|
||||
result.AnomalyScores = append(result.AnomalyScores, toV4Series(ts))
|
||||
}
|
||||
}
|
||||
|
||||
if len(aggBucket.PredictedSeries) != 0 {
|
||||
result.PredictedSeries = make([]*v3.Series, 0)
|
||||
for _, ts := range aggBucket.PredictedSeries {
|
||||
result.PredictedSeries = append(result.PredictedSeries, toV4Series(ts))
|
||||
}
|
||||
}
|
||||
|
||||
if len(aggBucket.LowerBoundSeries) != 0 {
|
||||
result.LowerBoundSeries = make([]*v3.Series, 0)
|
||||
for _, ts := range aggBucket.LowerBoundSeries {
|
||||
result.LowerBoundSeries = append(result.LowerBoundSeries, toV4Series(ts))
|
||||
}
|
||||
}
|
||||
|
||||
if len(aggBucket.UpperBoundSeries) != 0 {
|
||||
result.UpperBoundSeries = make([]*v3.Series, 0)
|
||||
for _, ts := range aggBucket.UpperBoundSeries {
|
||||
result.UpperBoundSeries = append(result.UpperBoundSeries, toV4Series(ts))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// ConvertV5TimeSeriesDataSliceToV4Results converts a slice of v5 TimeSeriesData to v4 QueryRangeResponse
|
||||
func ConvertV5TimeSeriesDataSliceToV4Results(v5DataSlice []*qbtypes.TimeSeriesData) *v3.QueryRangeResponse {
|
||||
response := &v3.QueryRangeResponse{
|
||||
ResultType: "matrix", // Time series data is typically "matrix" type
|
||||
Result: make([]*v3.Result, 0, len(v5DataSlice)),
|
||||
}
|
||||
|
||||
for _, v5Data := range v5DataSlice {
|
||||
if result := ConvertV5TimeSeriesDataToV4Result(v5Data); result != nil {
|
||||
response.Result = append(response.Result, result)
|
||||
}
|
||||
}
|
||||
|
||||
return response
|
||||
}
|
||||
@@ -152,9 +152,7 @@ func (f FilterOperator) IsStringSearchOperator() bool {
|
||||
FilterOperatorILike,
|
||||
FilterOperatorNotILike,
|
||||
FilterOperatorLike,
|
||||
FilterOperatorNotLike,
|
||||
FilterOperatorRegexp,
|
||||
FilterOperatorNotRegexp:
|
||||
FilterOperatorNotLike:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
|
||||
@@ -3,7 +3,6 @@ package querybuildertypesv5
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
@@ -175,54 +174,3 @@ func (q *QueryBuilderQuery[T]) Normalize() {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Fast‑path (no fingerprint grouping)
|
||||
// canShortCircuitDelta returns true if we can use the optimized query
|
||||
// for the given query
|
||||
// This is used to avoid the group by fingerprint thus improving the performance
|
||||
// for certain queries
|
||||
// cases where we can short circuit:
|
||||
// 1. time aggregation = (rate|increase) and space aggregation = sum
|
||||
// - rate = sum(value)/step, increase = sum(value) - sum of sums is same as sum of all values
|
||||
//
|
||||
// 2. time aggregation = sum and space aggregation = sum
|
||||
// - sum of sums is same as sum of all values
|
||||
//
|
||||
// 3. time aggregation = min and space aggregation = min
|
||||
// - min of mins is same as min of all values
|
||||
//
|
||||
// 4. time aggregation = max and space aggregation = max
|
||||
// - max of maxs is same as max of all values
|
||||
//
|
||||
// 5. special case exphist, there is no need for per series/fingerprint aggregation
|
||||
// we can directly use the quantilesDDMerge function
|
||||
//
|
||||
// all of this is true only for delta metrics
|
||||
func CanShortCircuitDelta(metricAgg MetricAggregation) bool {
|
||||
|
||||
if metricAgg.Temporality != metrictypes.Delta {
|
||||
return false
|
||||
}
|
||||
|
||||
ta := metricAgg.TimeAggregation
|
||||
sa := metricAgg.SpaceAggregation
|
||||
|
||||
if (ta == metrictypes.TimeAggregationRate || ta == metrictypes.TimeAggregationIncrease) &&
|
||||
sa == metrictypes.SpaceAggregationSum {
|
||||
return true
|
||||
}
|
||||
if ta == metrictypes.TimeAggregationSum && sa == metrictypes.SpaceAggregationSum {
|
||||
return true
|
||||
}
|
||||
if ta == metrictypes.TimeAggregationMin && sa == metrictypes.SpaceAggregationMin {
|
||||
return true
|
||||
}
|
||||
if ta == metrictypes.TimeAggregationMax && sa == metrictypes.SpaceAggregationMax {
|
||||
return true
|
||||
}
|
||||
if metricAgg.Type == metrictypes.ExpHistogramType && sa.IsPercentile() {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
var (
|
||||
ErrColumnNotFound = errors.Newf(errors.TypeNotFound, errors.CodeNotFound, "field not found")
|
||||
ErrBetweenValues = errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "(not) between operator requires two values")
|
||||
ErrBetweenValuesType = errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "(not) between operator requires two values of the number type")
|
||||
ErrInValues = errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "(not) in operator requires a list of values")
|
||||
ErrUnsupportedOperator = errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "unsupported operator")
|
||||
)
|
||||
|
||||
@@ -76,6 +76,33 @@ type TimeSeries struct {
|
||||
Values []*TimeSeriesValue `json:"values"`
|
||||
}
|
||||
|
||||
// LabelsMap converts the label slice to a map[string]string for use in
|
||||
// alert evaluation helpers that operate on flat label maps.
|
||||
func (ts *TimeSeries) LabelsMap() map[string]string {
|
||||
if ts == nil {
|
||||
return nil
|
||||
}
|
||||
m := make(map[string]string, len(ts.Labels))
|
||||
for _, l := range ts.Labels {
|
||||
m[l.Key.Name] = fmt.Sprintf("%v", l.Value)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// NonPartialValues returns only the values where Partial is false.
|
||||
func (ts *TimeSeries) NonPartialValues() []*TimeSeriesValue {
|
||||
if ts == nil {
|
||||
return nil
|
||||
}
|
||||
result := make([]*TimeSeriesValue, 0, len(ts.Values))
|
||||
for _, v := range ts.Values {
|
||||
if !v.Partial {
|
||||
result = append(result, v)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
type Label struct {
|
||||
Key telemetrytypes.TelemetryFieldKey `json:"key"`
|
||||
Value any `json:"value"`
|
||||
|
||||
@@ -179,6 +179,14 @@ func (q *QueryBuilderQuery[T]) validateAggregations() error {
|
||||
aggId,
|
||||
)
|
||||
}
|
||||
// Validate metric-specific aggregations
|
||||
if err := validateMetricAggregation(v); err != nil {
|
||||
aggId := fmt.Sprintf("aggregation #%d", i+1)
|
||||
if q.Name != "" {
|
||||
aggId = fmt.Sprintf("aggregation #%d in query '%s'", i+1, q.Name)
|
||||
}
|
||||
return wrapValidationError(err, aggId, "invalid metric %s: %s")
|
||||
}
|
||||
case TraceAggregation:
|
||||
if v.Expression == "" {
|
||||
aggId := fmt.Sprintf("aggregation #%d", i+1)
|
||||
@@ -795,3 +803,85 @@ func validateQueryEnvelope(envelope QueryEnvelope, requestType RequestType) erro
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// validateMetricAggregation validates metric-specific aggregation parameters
|
||||
func validateMetricAggregation(agg MetricAggregation) error {
|
||||
// we can't decide anything here without known temporality
|
||||
if agg.Temporality == metrictypes.Unknown {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate that rate/increase are only used with appropriate temporalities
|
||||
if agg.TimeAggregation == metrictypes.TimeAggregationRate || agg.TimeAggregation == metrictypes.TimeAggregationIncrease {
|
||||
// For gauge metrics (Unspecified temporality), rate/increase doesn't make sense
|
||||
if agg.Temporality == metrictypes.Unspecified {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"rate/increase aggregation cannot be used with gauge metrics (unspecified temporality)",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate percentile aggregations are only used with histogram types
|
||||
if agg.SpaceAggregation.IsPercentile() {
|
||||
if agg.Type != metrictypes.HistogramType && agg.Type != metrictypes.ExpHistogramType && agg.Type != metrictypes.SummaryType {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"percentile aggregation can only be used with histogram or summary metric types",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate time aggregation values
|
||||
validTimeAggregations := []metrictypes.TimeAggregation{
|
||||
metrictypes.TimeAggregationUnspecified,
|
||||
metrictypes.TimeAggregationLatest,
|
||||
metrictypes.TimeAggregationSum,
|
||||
metrictypes.TimeAggregationAvg,
|
||||
metrictypes.TimeAggregationMin,
|
||||
metrictypes.TimeAggregationMax,
|
||||
metrictypes.TimeAggregationCount,
|
||||
metrictypes.TimeAggregationCountDistinct,
|
||||
metrictypes.TimeAggregationRate,
|
||||
metrictypes.TimeAggregationIncrease,
|
||||
}
|
||||
|
||||
validTimeAgg := slices.Contains(validTimeAggregations, agg.TimeAggregation)
|
||||
if !validTimeAgg {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid time aggregation: %s",
|
||||
agg.TimeAggregation.StringValue(),
|
||||
).WithAdditional(
|
||||
"Valid time aggregations: latest, sum, avg, min, max, count, count_distinct, rate, increase",
|
||||
)
|
||||
}
|
||||
|
||||
// Validate space aggregation values
|
||||
validSpaceAggregations := []metrictypes.SpaceAggregation{
|
||||
metrictypes.SpaceAggregationUnspecified,
|
||||
metrictypes.SpaceAggregationSum,
|
||||
metrictypes.SpaceAggregationAvg,
|
||||
metrictypes.SpaceAggregationMin,
|
||||
metrictypes.SpaceAggregationMax,
|
||||
metrictypes.SpaceAggregationCount,
|
||||
metrictypes.SpaceAggregationPercentile50,
|
||||
metrictypes.SpaceAggregationPercentile75,
|
||||
metrictypes.SpaceAggregationPercentile90,
|
||||
metrictypes.SpaceAggregationPercentile95,
|
||||
metrictypes.SpaceAggregationPercentile99,
|
||||
}
|
||||
|
||||
validSpaceAgg := slices.Contains(validSpaceAggregations, agg.SpaceAggregation)
|
||||
if !validSpaceAgg {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid space aggregation: %s",
|
||||
agg.SpaceAggregation.StringValue(),
|
||||
).WithAdditional(
|
||||
"Valid space aggregations: sum, avg, min, max, count, p50, p75, p90, p95, p99",
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -203,11 +203,11 @@ func (rc *RuleCondition) IsValid() bool {
|
||||
}
|
||||
|
||||
// ShouldEval checks if the further series should be evaluated at all for alerts.
|
||||
func (rc *RuleCondition) ShouldEval(series *v3.Series) bool {
|
||||
func (rc *RuleCondition) ShouldEval(series *qbtypes.TimeSeries) bool {
|
||||
if rc == nil {
|
||||
return true
|
||||
}
|
||||
return !rc.RequireMinPoints || len(series.Points) >= rc.RequiredNumPoints
|
||||
return !rc.RequireMinPoints || len(series.NonPartialValues()) >= rc.RequiredNumPoints
|
||||
}
|
||||
|
||||
// QueryType is a shorthand method to get query type
|
||||
|
||||
@@ -355,6 +355,14 @@ func (r *PostableRule) validate() error {
|
||||
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "composite query is required"))
|
||||
}
|
||||
|
||||
if r.Version != "" && r.Version != "v5" {
|
||||
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "only version v5 is supported, got %q", r.Version))
|
||||
}
|
||||
|
||||
if r.Version == "v5" && r.RuleCondition.CompositeQuery != nil && len(r.RuleCondition.CompositeQuery.Queries) == 0 {
|
||||
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "queries envelope is required in compositeQuery"))
|
||||
}
|
||||
|
||||
if isAllQueriesDisabled(r.RuleCondition.CompositeQuery) {
|
||||
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "all queries are disabled in rule condition"))
|
||||
}
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
func TestIsAllQueriesDisabled(t *testing.T) {
|
||||
@@ -621,9 +623,9 @@ func TestParseIntoRuleThresholdGeneration(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test that threshold can evaluate properly
|
||||
vector, err := threshold.Eval(v3.Series{
|
||||
Points: []v3.Point{{Value: 0.15, Timestamp: 1000}}, // 150ms in seconds
|
||||
Labels: map[string]string{"test": "label"},
|
||||
vector, err := threshold.Eval(qbtypes.TimeSeries{
|
||||
Values: []*qbtypes.TimeSeriesValue{{Value: 0.15, Timestamp: 1000}}, // 150ms in seconds
|
||||
Labels: []*qbtypes.Label{{Key: telemetrytypes.TelemetryFieldKey{Name: "test"}, Value: "label"}},
|
||||
}, "", EvalData{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error in shouldAlert: %v", err)
|
||||
@@ -698,9 +700,9 @@ func TestParseIntoRuleMultipleThresholds(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test with a value that should trigger both WARNING and CRITICAL thresholds
|
||||
vector, err := threshold.Eval(v3.Series{
|
||||
Points: []v3.Point{{Value: 95.0, Timestamp: 1000}}, // 95% CPU usage
|
||||
Labels: map[string]string{"service": "test"},
|
||||
vector, err := threshold.Eval(qbtypes.TimeSeries{
|
||||
Values: []*qbtypes.TimeSeriesValue{{Value: 95.0, Timestamp: 1000}}, // 95% CPU usage
|
||||
Labels: []*qbtypes.Label{{Key: telemetrytypes.TelemetryFieldKey{Name: "service"}, Value: "test"}},
|
||||
}, "", EvalData{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error in shouldAlert: %v", err)
|
||||
@@ -708,9 +710,9 @@ func TestParseIntoRuleMultipleThresholds(t *testing.T) {
|
||||
|
||||
assert.Equal(t, 2, len(vector))
|
||||
|
||||
vector, err = threshold.Eval(v3.Series{
|
||||
Points: []v3.Point{{Value: 75.0, Timestamp: 1000}}, // 75% CPU usage
|
||||
Labels: map[string]string{"service": "test"},
|
||||
vector, err = threshold.Eval(qbtypes.TimeSeries{
|
||||
Values: []*qbtypes.TimeSeriesValue{{Value: 75.0, Timestamp: 1000}}, // 75% CPU usage
|
||||
Labels: []*qbtypes.Label{{Key: telemetrytypes.TelemetryFieldKey{Name: "service"}, Value: "test"}},
|
||||
}, "", EvalData{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error in shouldAlert: %v", err)
|
||||
@@ -723,7 +725,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
ruleJSON []byte
|
||||
series v3.Series
|
||||
series qbtypes.TimeSeries
|
||||
shouldAlert bool
|
||||
expectedValue float64
|
||||
}{
|
||||
@@ -751,9 +753,9 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
"selectedQuery": "A"
|
||||
}
|
||||
}`),
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"host": "server1"},
|
||||
Points: []v3.Point{
|
||||
series: qbtypes.TimeSeries{
|
||||
Labels: []*qbtypes.Label{{Key: telemetrytypes.TelemetryFieldKey{Name: "host"}, Value: "server1"}},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: -2.1}, // below & at least once, should alert
|
||||
{Timestamp: 2000, Value: -2.3},
|
||||
},
|
||||
@@ -785,9 +787,9 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
"selectedQuery": "A"
|
||||
}
|
||||
}`), // below & at least once, no value below -2.0
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"host": "server1"},
|
||||
Points: []v3.Point{
|
||||
series: qbtypes.TimeSeries{
|
||||
Labels: []*qbtypes.Label{{Key: telemetrytypes.TelemetryFieldKey{Name: "host"}, Value: "server1"}},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: -1.9},
|
||||
{Timestamp: 2000, Value: -1.8},
|
||||
},
|
||||
@@ -818,9 +820,9 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
"selectedQuery": "A"
|
||||
}
|
||||
}`), // above & at least once, should alert
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"host": "server1"},
|
||||
Points: []v3.Point{
|
||||
series: qbtypes.TimeSeries{
|
||||
Labels: []*qbtypes.Label{{Key: telemetrytypes.TelemetryFieldKey{Name: "host"}, Value: "server1"}},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 2.1}, // above 2.0, should alert
|
||||
{Timestamp: 2000, Value: 2.2},
|
||||
},
|
||||
@@ -852,9 +854,9 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
"selectedQuery": "A"
|
||||
}
|
||||
}`),
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"host": "server1"},
|
||||
Points: []v3.Point{
|
||||
series: qbtypes.TimeSeries{
|
||||
Labels: []*qbtypes.Label{{Key: telemetrytypes.TelemetryFieldKey{Name: "host"}, Value: "server1"}},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 1.1},
|
||||
{Timestamp: 2000, Value: 1.2},
|
||||
},
|
||||
@@ -885,9 +887,9 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
"selectedQuery": "A"
|
||||
}
|
||||
}`), // below and all the times
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"host": "server1"},
|
||||
Points: []v3.Point{
|
||||
series: qbtypes.TimeSeries{
|
||||
Labels: []*qbtypes.Label{{Key: telemetrytypes.TelemetryFieldKey{Name: "host"}, Value: "server1"}},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: -2.1}, // all below -2
|
||||
{Timestamp: 2000, Value: -2.2},
|
||||
{Timestamp: 3000, Value: -2.5},
|
||||
@@ -920,9 +922,9 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
"selectedQuery": "A"
|
||||
}
|
||||
}`),
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"host": "server1"},
|
||||
Points: []v3.Point{
|
||||
series: qbtypes.TimeSeries{
|
||||
Labels: []*qbtypes.Label{{Key: telemetrytypes.TelemetryFieldKey{Name: "host"}, Value: "server1"}},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: -3.0},
|
||||
{Timestamp: 2000, Value: -1.0}, // above -2, breaks condition
|
||||
{Timestamp: 3000, Value: -2.5},
|
||||
@@ -954,10 +956,10 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
"selectedQuery": "A"
|
||||
}
|
||||
}`),
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"host": "server1"},
|
||||
Points: []v3.Point{
|
||||
{Timestamp: 1000, Value: -8.0}, // abs(−8) >= 7, alert
|
||||
series: qbtypes.TimeSeries{
|
||||
Labels: []*qbtypes.Label{{Key: telemetrytypes.TelemetryFieldKey{Name: "host"}, Value: "server1"}},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: -8.0}, // abs(-8) >= 7, alert
|
||||
{Timestamp: 2000, Value: 5.0},
|
||||
},
|
||||
},
|
||||
@@ -988,9 +990,9 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
"selectedQuery": "A"
|
||||
}
|
||||
}`),
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"host": "server1"},
|
||||
Points: []v3.Point{
|
||||
series: qbtypes.TimeSeries{
|
||||
Labels: []*qbtypes.Label{{Key: telemetrytypes.TelemetryFieldKey{Name: "host"}, Value: "server1"}},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 80.0}, // below 90, should alert
|
||||
{Timestamp: 2000, Value: 85.0},
|
||||
},
|
||||
@@ -1022,9 +1024,9 @@ func TestAnomalyNegationEval(t *testing.T) {
|
||||
"selectedQuery": "A"
|
||||
}
|
||||
}`),
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"host": "server1"},
|
||||
Points: []v3.Point{
|
||||
series: qbtypes.TimeSeries{
|
||||
Labels: []*qbtypes.Label{{Key: telemetrytypes.TelemetryFieldKey{Name: "host"}, Value: "server1"}},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
{Timestamp: 1000, Value: 60.0}, // below, should alert
|
||||
{Timestamp: 2000, Value: 90.0},
|
||||
},
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/converter"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
@@ -83,7 +83,7 @@ func (eval EvalData) HasActiveAlert(sampleLabelFp uint64) bool {
|
||||
type RuleThreshold interface {
|
||||
// Eval runs the given series through the threshold rules
|
||||
// using the given EvalData and returns the matching series
|
||||
Eval(series v3.Series, unit string, evalData EvalData) (Vector, error)
|
||||
Eval(series qbtypes.TimeSeries, unit string, evalData EvalData) (Vector, error)
|
||||
GetRuleReceivers() []RuleReceivers
|
||||
}
|
||||
|
||||
@@ -122,10 +122,11 @@ func (r BasicRuleThresholds) Validate() error {
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (r BasicRuleThresholds) Eval(series v3.Series, unit string, evalData EvalData) (Vector, error) {
|
||||
func (r BasicRuleThresholds) Eval(series qbtypes.TimeSeries, unit string, evalData EvalData) (Vector, error) {
|
||||
var resultVector Vector
|
||||
thresholds := []BasicRuleThreshold(r)
|
||||
sortThresholds(thresholds)
|
||||
seriesLabels := series.LabelsMap()
|
||||
for _, threshold := range thresholds {
|
||||
smpl, shouldAlert := threshold.shouldAlert(series, unit)
|
||||
if shouldAlert {
|
||||
@@ -137,15 +138,15 @@ func (r BasicRuleThresholds) Eval(series v3.Series, unit string, evalData EvalDa
|
||||
resultVector = append(resultVector, smpl)
|
||||
continue
|
||||
} else if evalData.SendUnmatched {
|
||||
// Sanitise the series points to remove any NaN or Inf values
|
||||
series.Points = removeGroupinSetPoints(series)
|
||||
if len(series.Points) == 0 {
|
||||
// Sanitise the series values to remove any NaN, Inf, or partial values
|
||||
values := filterValidValues(series.Values)
|
||||
if len(values) == 0 {
|
||||
continue
|
||||
}
|
||||
// prepare the sample with the first point of the series
|
||||
// prepare the sample with the first value of the series
|
||||
smpl := Sample{
|
||||
Point: Point{T: series.Points[0].Timestamp, V: series.Points[0].Value},
|
||||
Metric: PrepareSampleLabelsForRule(series.Labels, threshold.Name),
|
||||
Point: Point{T: values[0].Timestamp, V: values[0].Value},
|
||||
Metric: PrepareSampleLabelsForRule(seriesLabels, threshold.Name),
|
||||
Target: *threshold.TargetValue,
|
||||
TargetUnit: threshold.TargetUnit,
|
||||
}
|
||||
@@ -160,7 +161,7 @@ func (r BasicRuleThresholds) Eval(series v3.Series, unit string, evalData EvalDa
|
||||
if threshold.RecoveryTarget == nil {
|
||||
continue
|
||||
}
|
||||
sampleLabels := PrepareSampleLabelsForRule(series.Labels, threshold.Name)
|
||||
sampleLabels := PrepareSampleLabelsForRule(seriesLabels, threshold.Name)
|
||||
alertHash := sampleLabels.Hash()
|
||||
// check if alert is active and then check if recovery threshold matches
|
||||
if evalData.HasActiveAlert(alertHash) {
|
||||
@@ -255,18 +256,23 @@ func (b BasicRuleThreshold) Validate() error {
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (b BasicRuleThreshold) matchesRecoveryThreshold(series v3.Series, ruleUnit string) (Sample, bool) {
|
||||
func (b BasicRuleThreshold) matchesRecoveryThreshold(series qbtypes.TimeSeries, ruleUnit string) (Sample, bool) {
|
||||
return b.shouldAlertWithTarget(series, b.recoveryTarget(ruleUnit))
|
||||
}
|
||||
func (b BasicRuleThreshold) shouldAlert(series v3.Series, ruleUnit string) (Sample, bool) {
|
||||
func (b BasicRuleThreshold) shouldAlert(series qbtypes.TimeSeries, ruleUnit string) (Sample, bool) {
|
||||
return b.shouldAlertWithTarget(series, b.target(ruleUnit))
|
||||
}
|
||||
|
||||
func removeGroupinSetPoints(series v3.Series) []v3.Point {
|
||||
var result []v3.Point
|
||||
for _, s := range series.Points {
|
||||
if s.Timestamp >= 0 && !math.IsNaN(s.Value) && !math.IsInf(s.Value, 0) {
|
||||
result = append(result, s)
|
||||
// filterValidValues returns only the values that are valid for alert evaluation:
|
||||
// non-partial, non-NaN, non-Inf, and with non-negative timestamps.
|
||||
func filterValidValues(values []*qbtypes.TimeSeriesValue) []*qbtypes.TimeSeriesValue {
|
||||
var result []*qbtypes.TimeSeriesValue
|
||||
for _, v := range values {
|
||||
if v.Partial {
|
||||
continue
|
||||
}
|
||||
if v.Timestamp >= 0 && !math.IsNaN(v.Value) && !math.IsInf(v.Value, 0) {
|
||||
result = append(result, v)
|
||||
}
|
||||
}
|
||||
return result
|
||||
@@ -284,15 +290,15 @@ func PrepareSampleLabelsForRule(seriesLabels map[string]string, thresholdName st
|
||||
return lb.Labels()
|
||||
}
|
||||
|
||||
func (b BasicRuleThreshold) shouldAlertWithTarget(series v3.Series, target float64) (Sample, bool) {
|
||||
func (b BasicRuleThreshold) shouldAlertWithTarget(series qbtypes.TimeSeries, target float64) (Sample, bool) {
|
||||
var shouldAlert bool
|
||||
var alertSmpl Sample
|
||||
lbls := PrepareSampleLabelsForRule(series.Labels, b.Name)
|
||||
lbls := PrepareSampleLabelsForRule(series.LabelsMap(), b.Name)
|
||||
|
||||
series.Points = removeGroupinSetPoints(series)
|
||||
values := filterValidValues(series.Values)
|
||||
|
||||
// nothing to evaluate
|
||||
if len(series.Points) == 0 {
|
||||
if len(values) == 0 {
|
||||
return alertSmpl, false
|
||||
}
|
||||
|
||||
@@ -300,7 +306,7 @@ func (b BasicRuleThreshold) shouldAlertWithTarget(series v3.Series, target float
|
||||
case AtleastOnce:
|
||||
// If any sample matches the condition, the rule is firing.
|
||||
if b.CompareOp == ValueIsAbove {
|
||||
for _, smpl := range series.Points {
|
||||
for _, smpl := range values {
|
||||
if smpl.Value > target {
|
||||
alertSmpl = Sample{Point: Point{V: smpl.Value}, Metric: lbls}
|
||||
shouldAlert = true
|
||||
@@ -308,7 +314,7 @@ func (b BasicRuleThreshold) shouldAlertWithTarget(series v3.Series, target float
|
||||
}
|
||||
}
|
||||
} else if b.CompareOp == ValueIsBelow {
|
||||
for _, smpl := range series.Points {
|
||||
for _, smpl := range values {
|
||||
if smpl.Value < target {
|
||||
alertSmpl = Sample{Point: Point{V: smpl.Value}, Metric: lbls}
|
||||
shouldAlert = true
|
||||
@@ -316,7 +322,7 @@ func (b BasicRuleThreshold) shouldAlertWithTarget(series v3.Series, target float
|
||||
}
|
||||
}
|
||||
} else if b.CompareOp == ValueIsEq {
|
||||
for _, smpl := range series.Points {
|
||||
for _, smpl := range values {
|
||||
if smpl.Value == target {
|
||||
alertSmpl = Sample{Point: Point{V: smpl.Value}, Metric: lbls}
|
||||
shouldAlert = true
|
||||
@@ -324,7 +330,7 @@ func (b BasicRuleThreshold) shouldAlertWithTarget(series v3.Series, target float
|
||||
}
|
||||
}
|
||||
} else if b.CompareOp == ValueIsNotEq {
|
||||
for _, smpl := range series.Points {
|
||||
for _, smpl := range values {
|
||||
if smpl.Value != target {
|
||||
alertSmpl = Sample{Point: Point{V: smpl.Value}, Metric: lbls}
|
||||
shouldAlert = true
|
||||
@@ -332,7 +338,7 @@ func (b BasicRuleThreshold) shouldAlertWithTarget(series v3.Series, target float
|
||||
}
|
||||
}
|
||||
} else if b.CompareOp == ValueOutsideBounds {
|
||||
for _, smpl := range series.Points {
|
||||
for _, smpl := range values {
|
||||
if math.Abs(smpl.Value) >= target {
|
||||
alertSmpl = Sample{Point: Point{V: smpl.Value}, Metric: lbls}
|
||||
shouldAlert = true
|
||||
@@ -345,7 +351,7 @@ func (b BasicRuleThreshold) shouldAlertWithTarget(series v3.Series, target float
|
||||
shouldAlert = true
|
||||
alertSmpl = Sample{Point: Point{V: target}, Metric: lbls}
|
||||
if b.CompareOp == ValueIsAbove {
|
||||
for _, smpl := range series.Points {
|
||||
for _, smpl := range values {
|
||||
if smpl.Value <= target {
|
||||
shouldAlert = false
|
||||
break
|
||||
@@ -354,7 +360,7 @@ func (b BasicRuleThreshold) shouldAlertWithTarget(series v3.Series, target float
|
||||
// use min value from the series
|
||||
if shouldAlert {
|
||||
var minValue float64 = math.Inf(1)
|
||||
for _, smpl := range series.Points {
|
||||
for _, smpl := range values {
|
||||
if smpl.Value < minValue {
|
||||
minValue = smpl.Value
|
||||
}
|
||||
@@ -362,7 +368,7 @@ func (b BasicRuleThreshold) shouldAlertWithTarget(series v3.Series, target float
|
||||
alertSmpl = Sample{Point: Point{V: minValue}, Metric: lbls}
|
||||
}
|
||||
} else if b.CompareOp == ValueIsBelow {
|
||||
for _, smpl := range series.Points {
|
||||
for _, smpl := range values {
|
||||
if smpl.Value >= target {
|
||||
shouldAlert = false
|
||||
break
|
||||
@@ -370,7 +376,7 @@ func (b BasicRuleThreshold) shouldAlertWithTarget(series v3.Series, target float
|
||||
}
|
||||
if shouldAlert {
|
||||
var maxValue float64 = math.Inf(-1)
|
||||
for _, smpl := range series.Points {
|
||||
for _, smpl := range values {
|
||||
if smpl.Value > maxValue {
|
||||
maxValue = smpl.Value
|
||||
}
|
||||
@@ -378,14 +384,14 @@ func (b BasicRuleThreshold) shouldAlertWithTarget(series v3.Series, target float
|
||||
alertSmpl = Sample{Point: Point{V: maxValue}, Metric: lbls}
|
||||
}
|
||||
} else if b.CompareOp == ValueIsEq {
|
||||
for _, smpl := range series.Points {
|
||||
for _, smpl := range values {
|
||||
if smpl.Value != target {
|
||||
shouldAlert = false
|
||||
break
|
||||
}
|
||||
}
|
||||
} else if b.CompareOp == ValueIsNotEq {
|
||||
for _, smpl := range series.Points {
|
||||
for _, smpl := range values {
|
||||
if smpl.Value == target {
|
||||
shouldAlert = false
|
||||
break
|
||||
@@ -393,7 +399,7 @@ func (b BasicRuleThreshold) shouldAlertWithTarget(series v3.Series, target float
|
||||
}
|
||||
// use any non-inf or nan value from the series
|
||||
if shouldAlert {
|
||||
for _, smpl := range series.Points {
|
||||
for _, smpl := range values {
|
||||
if !math.IsInf(smpl.Value, 0) && !math.IsNaN(smpl.Value) {
|
||||
alertSmpl = Sample{Point: Point{V: smpl.Value}, Metric: lbls}
|
||||
break
|
||||
@@ -401,7 +407,7 @@ func (b BasicRuleThreshold) shouldAlertWithTarget(series v3.Series, target float
|
||||
}
|
||||
}
|
||||
} else if b.CompareOp == ValueOutsideBounds {
|
||||
for _, smpl := range series.Points {
|
||||
for _, smpl := range values {
|
||||
if math.Abs(smpl.Value) < target {
|
||||
alertSmpl = Sample{Point: Point{V: smpl.Value}, Metric: lbls}
|
||||
shouldAlert = false
|
||||
@@ -412,7 +418,7 @@ func (b BasicRuleThreshold) shouldAlertWithTarget(series v3.Series, target float
|
||||
case OnAverage:
|
||||
// If the average of all samples matches the condition, the rule is firing.
|
||||
var sum, count float64
|
||||
for _, smpl := range series.Points {
|
||||
for _, smpl := range values {
|
||||
if math.IsNaN(smpl.Value) || math.IsInf(smpl.Value, 0) {
|
||||
continue
|
||||
}
|
||||
@@ -446,7 +452,7 @@ func (b BasicRuleThreshold) shouldAlertWithTarget(series v3.Series, target float
|
||||
// If the sum of all samples matches the condition, the rule is firing.
|
||||
var sum float64
|
||||
|
||||
for _, smpl := range series.Points {
|
||||
for _, smpl := range values {
|
||||
if math.IsNaN(smpl.Value) || math.IsInf(smpl.Value, 0) {
|
||||
continue
|
||||
}
|
||||
@@ -477,21 +483,22 @@ func (b BasicRuleThreshold) shouldAlertWithTarget(series v3.Series, target float
|
||||
case Last:
|
||||
// If the last sample matches the condition, the rule is firing.
|
||||
shouldAlert = false
|
||||
alertSmpl = Sample{Point: Point{V: series.Points[len(series.Points)-1].Value}, Metric: lbls}
|
||||
lastValue := values[len(values)-1].Value
|
||||
alertSmpl = Sample{Point: Point{V: lastValue}, Metric: lbls}
|
||||
if b.CompareOp == ValueIsAbove {
|
||||
if series.Points[len(series.Points)-1].Value > target {
|
||||
if lastValue > target {
|
||||
shouldAlert = true
|
||||
}
|
||||
} else if b.CompareOp == ValueIsBelow {
|
||||
if series.Points[len(series.Points)-1].Value < target {
|
||||
if lastValue < target {
|
||||
shouldAlert = true
|
||||
}
|
||||
} else if b.CompareOp == ValueIsEq {
|
||||
if series.Points[len(series.Points)-1].Value == target {
|
||||
if lastValue == target {
|
||||
shouldAlert = true
|
||||
}
|
||||
} else if b.CompareOp == ValueIsNotEq {
|
||||
if series.Points[len(series.Points)-1].Value != target {
|
||||
if lastValue != target {
|
||||
shouldAlert = true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,16 +6,24 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
func TestBasicRuleThresholdEval_UnitConversion(t *testing.T) {
|
||||
target := 100.0
|
||||
|
||||
makeLabel := func(name, value string) *qbtypes.Label {
|
||||
return &qbtypes.Label{Key: telemetrytypes.TelemetryFieldKey{Name: name}, Value: value}
|
||||
}
|
||||
makeValue := func(value float64, ts int64) *qbtypes.TimeSeriesValue {
|
||||
return &qbtypes.TimeSeriesValue{Value: value, Timestamp: ts}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
threshold BasicRuleThreshold
|
||||
series v3.Series
|
||||
series qbtypes.TimeSeries
|
||||
ruleUnit string
|
||||
shouldAlert bool
|
||||
}{
|
||||
@@ -28,11 +36,9 @@ func TestBasicRuleThresholdEval_UnitConversion(t *testing.T) {
|
||||
MatchType: AtleastOnce,
|
||||
CompareOp: ValueIsAbove,
|
||||
},
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"service": "test"},
|
||||
Points: []v3.Point{
|
||||
{Value: 0.15, Timestamp: 1000}, // 150ms in seconds
|
||||
},
|
||||
series: qbtypes.TimeSeries{
|
||||
Labels: []*qbtypes.Label{makeLabel("service", "test")},
|
||||
Values: []*qbtypes.TimeSeriesValue{makeValue(0.15, 1000)}, // 150ms in seconds
|
||||
},
|
||||
ruleUnit: "s",
|
||||
shouldAlert: true,
|
||||
@@ -46,11 +52,9 @@ func TestBasicRuleThresholdEval_UnitConversion(t *testing.T) {
|
||||
MatchType: AtleastOnce,
|
||||
CompareOp: ValueIsAbove,
|
||||
},
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"service": "test"},
|
||||
Points: []v3.Point{
|
||||
{Value: 0.05, Timestamp: 1000}, // 50ms in seconds
|
||||
},
|
||||
series: qbtypes.TimeSeries{
|
||||
Labels: []*qbtypes.Label{makeLabel("service", "test")},
|
||||
Values: []*qbtypes.TimeSeriesValue{makeValue(0.05, 1000)}, // 50ms in seconds
|
||||
},
|
||||
ruleUnit: "s",
|
||||
shouldAlert: false,
|
||||
@@ -64,11 +68,9 @@ func TestBasicRuleThresholdEval_UnitConversion(t *testing.T) {
|
||||
MatchType: AtleastOnce,
|
||||
CompareOp: ValueIsAbove,
|
||||
},
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"service": "test"},
|
||||
Points: []v3.Point{
|
||||
{Value: 150000, Timestamp: 1000}, // 150000ms = 150s
|
||||
},
|
||||
series: qbtypes.TimeSeries{
|
||||
Labels: []*qbtypes.Label{makeLabel("service", "test")},
|
||||
Values: []*qbtypes.TimeSeriesValue{makeValue(150000, 1000)}, // 150000ms = 150s
|
||||
},
|
||||
ruleUnit: "ms",
|
||||
shouldAlert: true,
|
||||
@@ -83,11 +85,9 @@ func TestBasicRuleThresholdEval_UnitConversion(t *testing.T) {
|
||||
MatchType: AtleastOnce,
|
||||
CompareOp: ValueIsAbove,
|
||||
},
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"service": "test"},
|
||||
Points: []v3.Point{
|
||||
{Value: 0.15, Timestamp: 1000}, // 0.15KiB ≈ 153.6 bytes
|
||||
},
|
||||
series: qbtypes.TimeSeries{
|
||||
Labels: []*qbtypes.Label{makeLabel("service", "test")},
|
||||
Values: []*qbtypes.TimeSeriesValue{makeValue(0.15, 1000)}, // 0.15KiB ≈ 153.6 bytes
|
||||
},
|
||||
ruleUnit: "kbytes",
|
||||
shouldAlert: true,
|
||||
@@ -101,11 +101,9 @@ func TestBasicRuleThresholdEval_UnitConversion(t *testing.T) {
|
||||
MatchType: AtleastOnce,
|
||||
CompareOp: ValueIsAbove,
|
||||
},
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"service": "test"},
|
||||
Points: []v3.Point{
|
||||
{Value: 0.15, Timestamp: 1000},
|
||||
},
|
||||
series: qbtypes.TimeSeries{
|
||||
Labels: []*qbtypes.Label{makeLabel("service", "test")},
|
||||
Values: []*qbtypes.TimeSeriesValue{makeValue(0.15, 1000)},
|
||||
},
|
||||
ruleUnit: "mbytes",
|
||||
shouldAlert: true,
|
||||
@@ -120,11 +118,9 @@ func TestBasicRuleThresholdEval_UnitConversion(t *testing.T) {
|
||||
MatchType: AtleastOnce,
|
||||
CompareOp: ValueIsBelow,
|
||||
},
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"service": "test"},
|
||||
Points: []v3.Point{
|
||||
{Value: 0.05, Timestamp: 1000}, // 50ms in seconds
|
||||
},
|
||||
series: qbtypes.TimeSeries{
|
||||
Labels: []*qbtypes.Label{makeLabel("service", "test")},
|
||||
Values: []*qbtypes.TimeSeriesValue{makeValue(0.05, 1000)}, // 50ms in seconds
|
||||
},
|
||||
ruleUnit: "s",
|
||||
shouldAlert: true,
|
||||
@@ -138,12 +134,12 @@ func TestBasicRuleThresholdEval_UnitConversion(t *testing.T) {
|
||||
MatchType: OnAverage,
|
||||
CompareOp: ValueIsAbove,
|
||||
},
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"service": "test"},
|
||||
Points: []v3.Point{
|
||||
{Value: 0.08, Timestamp: 1000}, // 80ms
|
||||
{Value: 0.12, Timestamp: 2000}, // 120ms
|
||||
{Value: 0.15, Timestamp: 3000}, // 150ms
|
||||
series: qbtypes.TimeSeries{
|
||||
Labels: []*qbtypes.Label{makeLabel("service", "test")},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
makeValue(0.08, 1000), // 80ms
|
||||
makeValue(0.12, 2000), // 120ms
|
||||
makeValue(0.15, 3000), // 150ms
|
||||
},
|
||||
},
|
||||
ruleUnit: "s",
|
||||
@@ -158,12 +154,12 @@ func TestBasicRuleThresholdEval_UnitConversion(t *testing.T) {
|
||||
MatchType: InTotal,
|
||||
CompareOp: ValueIsAbove,
|
||||
},
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"service": "test"},
|
||||
Points: []v3.Point{
|
||||
{Value: 0.04, Timestamp: 1000}, // 40MB
|
||||
{Value: 0.05, Timestamp: 2000}, // 50MB
|
||||
{Value: 0.03, Timestamp: 3000}, // 30MB
|
||||
series: qbtypes.TimeSeries{
|
||||
Labels: []*qbtypes.Label{makeLabel("service", "test")},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
makeValue(0.04, 1000), // 40MB
|
||||
makeValue(0.05, 2000), // 50MB
|
||||
makeValue(0.03, 3000), // 30MB
|
||||
},
|
||||
},
|
||||
ruleUnit: "decgbytes",
|
||||
@@ -178,12 +174,12 @@ func TestBasicRuleThresholdEval_UnitConversion(t *testing.T) {
|
||||
MatchType: AllTheTimes,
|
||||
CompareOp: ValueIsAbove,
|
||||
},
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"service": "test"},
|
||||
Points: []v3.Point{
|
||||
{Value: 0.11, Timestamp: 1000}, // 110ms
|
||||
{Value: 0.12, Timestamp: 2000}, // 120ms
|
||||
{Value: 0.15, Timestamp: 3000}, // 150ms
|
||||
series: qbtypes.TimeSeries{
|
||||
Labels: []*qbtypes.Label{makeLabel("service", "test")},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
makeValue(0.11, 1000), // 110ms
|
||||
makeValue(0.12, 2000), // 120ms
|
||||
makeValue(0.15, 3000), // 150ms
|
||||
},
|
||||
},
|
||||
ruleUnit: "s",
|
||||
@@ -198,11 +194,11 @@ func TestBasicRuleThresholdEval_UnitConversion(t *testing.T) {
|
||||
MatchType: Last,
|
||||
CompareOp: ValueIsAbove,
|
||||
},
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"service": "test"},
|
||||
Points: []v3.Point{
|
||||
{Value: 0.15, Timestamp: 1000}, // 150kB
|
||||
{Value: 0.05, Timestamp: 2000}, // 50kB (last value)
|
||||
series: qbtypes.TimeSeries{
|
||||
Labels: []*qbtypes.Label{makeLabel("service", "test")},
|
||||
Values: []*qbtypes.TimeSeriesValue{
|
||||
makeValue(0.15, 1000), // 150kB
|
||||
makeValue(0.05, 2000), // 50kB (last value)
|
||||
},
|
||||
},
|
||||
ruleUnit: "decmbytes",
|
||||
@@ -218,11 +214,9 @@ func TestBasicRuleThresholdEval_UnitConversion(t *testing.T) {
|
||||
MatchType: AtleastOnce,
|
||||
CompareOp: ValueIsAbove,
|
||||
},
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"service": "test"},
|
||||
Points: []v3.Point{
|
||||
{Value: 0.15, Timestamp: 1000},
|
||||
},
|
||||
series: qbtypes.TimeSeries{
|
||||
Labels: []*qbtypes.Label{makeLabel("service", "test")},
|
||||
Values: []*qbtypes.TimeSeriesValue{makeValue(0.15, 1000)},
|
||||
},
|
||||
ruleUnit: "KBs",
|
||||
shouldAlert: true,
|
||||
@@ -237,11 +231,9 @@ func TestBasicRuleThresholdEval_UnitConversion(t *testing.T) {
|
||||
MatchType: AtleastOnce,
|
||||
CompareOp: ValueIsAbove,
|
||||
},
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"service": "test"},
|
||||
Points: []v3.Point{
|
||||
{Value: 150, Timestamp: 1000}, // 150ms
|
||||
},
|
||||
series: qbtypes.TimeSeries{
|
||||
Labels: []*qbtypes.Label{makeLabel("service", "test")},
|
||||
Values: []*qbtypes.TimeSeriesValue{makeValue(150, 1000)}, // 150ms
|
||||
},
|
||||
ruleUnit: "ms",
|
||||
shouldAlert: true,
|
||||
@@ -256,11 +248,9 @@ func TestBasicRuleThresholdEval_UnitConversion(t *testing.T) {
|
||||
MatchType: AtleastOnce,
|
||||
CompareOp: ValueIsAbove,
|
||||
},
|
||||
series: v3.Series{
|
||||
Labels: map[string]string{"service": "test"},
|
||||
Points: []v3.Point{
|
||||
{Value: 150, Timestamp: 1000}, // 150 (unitless)
|
||||
},
|
||||
series: qbtypes.TimeSeries{
|
||||
Labels: []*qbtypes.Label{makeLabel("service", "test")},
|
||||
Values: []*qbtypes.TimeSeriesValue{makeValue(150, 1000)}, // 150 (unitless)
|
||||
},
|
||||
ruleUnit: "",
|
||||
shouldAlert: true,
|
||||
|
||||
@@ -14,10 +14,10 @@ from fixtures.alertutils import (
|
||||
from fixtures.logger import setup_logger
|
||||
from fixtures.utils import get_testdata_file_path
|
||||
|
||||
# Alert test cases use a 30-second wait time to verify expected alert firing.
|
||||
# Alert data is set up to trigger on the first rule manager evaluation.
|
||||
# With a 15-second eval frequency for most rules, plus alertmanager's
|
||||
# group_wait and group_interval delays, alerts should fire well within 30 seconds.
|
||||
# test cases for match type and compare operators have wait time of 30 seconds to verify the alert expectation.
|
||||
# we've poistioned the alert data to fire the alert on first eval of rule manager, the eval frequency
|
||||
# for most alert rules are set of 15s so considering this delay plus some delay from alert manager's
|
||||
# group_wait and group_interval, even in worst case most alerts should be triggered in about 30 seconds
|
||||
TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_above_at_least_once",
|
||||
@@ -25,7 +25,6 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
|
||||
alert_data=[
|
||||
types.AlertData(
|
||||
type="metrics",
|
||||
# active requests dummy data
|
||||
data_path="alerts/test_scenarios/threshold_above_at_least_once/alert_data.jsonl",
|
||||
),
|
||||
],
|
||||
@@ -116,28 +115,30 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
|
||||
],
|
||||
),
|
||||
),
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_above_last",
|
||||
rule_path="alerts/test_scenarios/threshold_above_last/rule.json",
|
||||
alert_data=[
|
||||
types.AlertData(
|
||||
type="metrics",
|
||||
data_path="alerts/test_scenarios/threshold_above_last/alert_data.jsonl",
|
||||
),
|
||||
],
|
||||
alert_expectation=types.AlertExpectation(
|
||||
should_alert=True,
|
||||
wait_time_seconds=30,
|
||||
expected_alerts=[
|
||||
types.FiringAlert(
|
||||
labels={
|
||||
"alertname": "threshold_above_last",
|
||||
"threshold.name": "critical",
|
||||
}
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
# TODO: @abhishekhugetech enable the test for matchType last, pylint: disable=W0511
|
||||
# after the [issue](https://github.com/SigNoz/engineering-pod/issues/3801) with matchType last is fixed
|
||||
# types.AlertTestCase(
|
||||
# name="test_threshold_above_last",
|
||||
# rule_path="alerts/test_scenarios/threshold_above_last/rule.json",
|
||||
# alert_data=[
|
||||
# types.AlertData(
|
||||
# type="metrics",
|
||||
# data_path="alerts/test_scenarios/threshold_above_last/alert_data.jsonl",
|
||||
# ),
|
||||
# ],
|
||||
# alert_expectation=types.AlertExpectation(
|
||||
# should_alert=True,
|
||||
# wait_time_seconds=30,
|
||||
# expected_alerts=[
|
||||
# types.FiringAlert(
|
||||
# labels={
|
||||
# "alertname": "threshold_above_last",
|
||||
# "threshold.name": "critical",
|
||||
# }
|
||||
# ),
|
||||
# ],
|
||||
# ),
|
||||
# ),
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_below_at_least_once",
|
||||
rule_path="alerts/test_scenarios/threshold_below_at_least_once/rule.json",
|
||||
@@ -188,7 +189,6 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
|
||||
alert_data=[
|
||||
types.AlertData(
|
||||
type="metrics",
|
||||
# one rate ~5 + rest 0.01 so it remains in total below 10
|
||||
data_path="alerts/test_scenarios/threshold_below_in_total/alert_data.jsonl",
|
||||
),
|
||||
],
|
||||
@@ -227,28 +227,30 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
|
||||
],
|
||||
),
|
||||
),
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_below_last",
|
||||
rule_path="alerts/test_scenarios/threshold_below_last/rule.json",
|
||||
alert_data=[
|
||||
types.AlertData(
|
||||
type="metrics",
|
||||
data_path="alerts/test_scenarios/threshold_below_last/alert_data.jsonl",
|
||||
),
|
||||
],
|
||||
alert_expectation=types.AlertExpectation(
|
||||
should_alert=True,
|
||||
wait_time_seconds=30,
|
||||
expected_alerts=[
|
||||
types.FiringAlert(
|
||||
labels={
|
||||
"alertname": "threshold_below_last",
|
||||
"threshold.name": "critical",
|
||||
}
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
# TODO: @abhishekhugetech enable the test for matchType last,
|
||||
# after the [issue](https://github.com/SigNoz/engineering-pod/issues/3801) with matchType last is fixed, pylint: disable=W0511
|
||||
# types.AlertTestCase(
|
||||
# name="test_threshold_below_last",
|
||||
# rule_path="alerts/test_scenarios/threshold_below_last/rule.json",
|
||||
# alert_data=[
|
||||
# types.AlertData(
|
||||
# type="metrics",
|
||||
# data_path="alerts/test_scenarios/threshold_below_last/alert_data.jsonl",
|
||||
# ),
|
||||
# ],
|
||||
# alert_expectation=types.AlertExpectation(
|
||||
# should_alert=True,
|
||||
# wait_time_seconds=30,
|
||||
# expected_alerts=[
|
||||
# types.FiringAlert(
|
||||
# labels={
|
||||
# "alertname": "threshold_below_last",
|
||||
# "threshold.name": "critical",
|
||||
# }
|
||||
# ),
|
||||
# ],
|
||||
# ),
|
||||
# ),
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_equal_to_at_least_once",
|
||||
rule_path="alerts/test_scenarios/threshold_equal_to_at_least_once/rule.json",
|
||||
@@ -337,28 +339,30 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
|
||||
],
|
||||
),
|
||||
),
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_equal_to_last",
|
||||
rule_path="alerts/test_scenarios/threshold_equal_to_last/rule.json",
|
||||
alert_data=[
|
||||
types.AlertData(
|
||||
type="metrics",
|
||||
data_path="alerts/test_scenarios/threshold_equal_to_last/alert_data.jsonl",
|
||||
),
|
||||
],
|
||||
alert_expectation=types.AlertExpectation(
|
||||
should_alert=True,
|
||||
wait_time_seconds=30,
|
||||
expected_alerts=[
|
||||
types.FiringAlert(
|
||||
labels={
|
||||
"alertname": "threshold_equal_to_last",
|
||||
"threshold.name": "critical",
|
||||
}
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
# TODO: @abhishekhugetech enable the test for matchType last,
|
||||
# after the [issue](https://github.com/SigNoz/engineering-pod/issues/3801) with matchType last is fixed, pylint: disable=W0511
|
||||
# types.AlertTestCase(
|
||||
# name="test_threshold_equal_to_last",
|
||||
# rule_path="alerts/test_scenarios/threshold_equal_to_last/rule.json",
|
||||
# alert_data=[
|
||||
# types.AlertData(
|
||||
# type="metrics",
|
||||
# data_path="alerts/test_scenarios/threshold_equal_to_last/alert_data.jsonl",
|
||||
# ),
|
||||
# ],
|
||||
# alert_expectation=types.AlertExpectation(
|
||||
# should_alert=True,
|
||||
# wait_time_seconds=30,
|
||||
# expected_alerts=[
|
||||
# types.FiringAlert(
|
||||
# labels={
|
||||
# "alertname": "threshold_equal_to_last",
|
||||
# "threshold.name": "critical",
|
||||
# }
|
||||
# ),
|
||||
# ],
|
||||
# ),
|
||||
# ),
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_not_equal_to_at_least_once",
|
||||
rule_path="alerts/test_scenarios/threshold_not_equal_to_at_least_once/rule.json",
|
||||
@@ -447,28 +451,30 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
|
||||
],
|
||||
),
|
||||
),
|
||||
types.AlertTestCase(
|
||||
name="test_threshold_not_equal_to_last",
|
||||
rule_path="alerts/test_scenarios/threshold_not_equal_to_last/rule.json",
|
||||
alert_data=[
|
||||
types.AlertData(
|
||||
type="metrics",
|
||||
data_path="alerts/test_scenarios/threshold_not_equal_to_last/alert_data.jsonl",
|
||||
),
|
||||
],
|
||||
alert_expectation=types.AlertExpectation(
|
||||
should_alert=True,
|
||||
wait_time_seconds=30,
|
||||
expected_alerts=[
|
||||
types.FiringAlert(
|
||||
labels={
|
||||
"alertname": "threshold_not_equal_to_last",
|
||||
"threshold.name": "critical",
|
||||
}
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
# TODO: @abhishekhugetech enable the test for matchType last,
|
||||
# after the [issue](https://github.com/SigNoz/engineering-pod/issues/3801) with matchType last is fixed, pylint: disable=W0511
|
||||
# types.AlertTestCase(
|
||||
# name="test_threshold_not_equal_to_last",
|
||||
# rule_path="alerts/test_scenarios/threshold_not_equal_to_last/rule.json",
|
||||
# alert_data=[
|
||||
# types.AlertData(
|
||||
# type="metrics",
|
||||
# data_path="alerts/test_scenarios/threshold_not_equal_to_last/alert_data.jsonl",
|
||||
# ),
|
||||
# ],
|
||||
# alert_expectation=types.AlertExpectation(
|
||||
# should_alert=True,
|
||||
# wait_time_seconds=30,
|
||||
# expected_alerts=[
|
||||
# types.FiringAlert(
|
||||
# labels={
|
||||
# "alertname": "threshold_not_equal_to_last",
|
||||
# "threshold.name": "critical",
|
||||
# }
|
||||
# ),
|
||||
# ],
|
||||
# ),
|
||||
# ),
|
||||
]
|
||||
|
||||
# test cases unit conversion
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -54,17 +54,17 @@ def test_rate_with_steady_values_and_reset(
|
||||
|
||||
data = response.json()
|
||||
result_values = sorted(get_series_values(data, "A"), key=lambda x: x["timestamp"])
|
||||
assert len(result_values) >= 58
|
||||
assert len(result_values) >= 59
|
||||
# the counter reset happened at 31st minute
|
||||
assert (
|
||||
result_values[29]["value"] == 0.0167
|
||||
result_values[30]["value"] == 0.0167
|
||||
) # i.e 2/120 i.e 29th to 31st minute changes
|
||||
assert (
|
||||
result_values[30]["value"] == 0.133
|
||||
result_values[31]["value"] == 0.133
|
||||
) # i.e 10/60 i.e 31st to 32nd minute changes
|
||||
count_of_steady_rate = sum(1 for v in result_values if v["value"] == 0.0833)
|
||||
assert (
|
||||
count_of_steady_rate >= 55
|
||||
count_of_steady_rate >= 56
|
||||
) # 59 - (1 reset + 1 high rate + 1 at the beginning)
|
||||
# All rates should be non-negative (stale periods = 0 rate)
|
||||
for v in result_values:
|
||||
|
||||
@@ -72,17 +72,16 @@ def test_with_steady_values_and_reset(
|
||||
|
||||
data = response.json()
|
||||
result_values = sorted(get_series_values(data, "A"), key=lambda x: x["timestamp"])
|
||||
assert len(result_values) >= 58
|
||||
assert len(result_values) >= 59
|
||||
# the counter reset happened at 31st minute
|
||||
# we skip the rate value for the first data point without previous value
|
||||
assert result_values[29]["value"] == expected_value_at_31st_minute
|
||||
assert result_values[30]["value"] == expected_value_at_32nd_minute
|
||||
assert result_values[30]["value"] == expected_value_at_31st_minute
|
||||
assert result_values[31]["value"] == expected_value_at_32nd_minute
|
||||
assert (
|
||||
result_values[38]["value"] == steady_value
|
||||
) # 38th minute is when cumulative shifts to delta
|
||||
result_values[39]["value"] == steady_value
|
||||
) # 39th minute is when cumulative shifts to delta
|
||||
count_of_steady_rate = sum(1 for v in result_values if v["value"] == steady_value)
|
||||
assert (
|
||||
count_of_steady_rate >= 55
|
||||
count_of_steady_rate >= 56
|
||||
) # 59 - (1 reset + 1 high rate + 1 at the beginning)
|
||||
# All rates should be non-negative (stale periods = 0 rate)
|
||||
for v in result_values:
|
||||
@@ -317,12 +316,12 @@ def test_for_service_with_switch(
|
||||
|
||||
data = response.json()
|
||||
result_values = sorted(get_series_values(data, "A"), key=lambda x: x["timestamp"])
|
||||
assert len(result_values) >= 59
|
||||
assert result_values[29]["value"] == expected_value_at_30th_minute # 0.183
|
||||
assert result_values[30]["value"] == expected_value_at_31st_minute # 0.183
|
||||
assert result_values[37]["value"] == value_at_switch # 0.25
|
||||
assert len(result_values) >= 60
|
||||
assert result_values[30]["value"] == expected_value_at_30th_minute # 0.183
|
||||
assert result_values[31]["value"] == expected_value_at_31st_minute # 0.183
|
||||
assert result_values[38]["value"] == value_at_switch # 0.25
|
||||
assert (
|
||||
result_values[38]["value"] == value_at_switch # 0.25
|
||||
result_values[39]["value"] == value_at_switch # 0.25
|
||||
) # 39th minute is when cumulative shifts to delta
|
||||
# All rates should be non-negative (stale periods = 0 rate)
|
||||
for v in result_values:
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:01:00+00:00","value":1,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:02:00+00:00","value":2,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:03:00+00:00","value":4,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:04:00+00:00","value":4,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:05:00+00:00","value":15,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:06:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:07:00+00:00","value":36,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:08:00+00:00","value":25,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:09:00+00:00","value":37,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:10:00+00:00","value":35,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:11:00+00:00","value":39,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:12:00+00:00","value":25,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:01:00+00:00","value":1,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:02:00+00:00","value":2,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:03:00+00:00","value":3,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:04:00+00:00","value":4,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:05:00+00:00","value":19,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:06:00+00:00","value":20,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:07:00+00:00","value":35,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:08:00+00:00","value":36,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:09:00+00:00","value":37,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:10:00+00:00","value":38,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:11:00+00:00","value":39,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:12:00+00:00","value":40,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
"type": "clickhouse_sql",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"query": "WITH __temporal_aggregation_cte AS (\n SELECT \n fingerprint, \n toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(60)) AS ts, \n avg(value) AS per_series_value \n FROM signoz_metrics.distributed_samples_v4 AS points \n INNER JOIN (\n SELECT fingerprint \n FROM signoz_metrics.time_series_v4 \n WHERE metric_name IN ('request_total_threshold_above_at_least_once') \n AND LOWER(temporality) LIKE LOWER('cumulative') \n AND __normalized = false \n GROUP BY fingerprint\n ) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint \n WHERE metric_name IN ('request_total_threshold_above_at_least_once') \n AND unix_milli >= $start_timestamp_ms \n AND unix_milli < $end_timestamp_ms \n GROUP BY fingerprint, ts \n ORDER BY fingerprint, ts\n), \n__spatial_aggregation_cte AS (\n SELECT \n ts, \n sum(per_series_value) AS value \n FROM __temporal_aggregation_cte \n WHERE isNaN(per_series_value) = 0 \n GROUP BY ts\n) \nSELECT * FROM __spatial_aggregation_cte \nORDER BY ts"
|
||||
"query": "WITH __temporal_aggregation_cte AS (\n SELECT \n fingerprint, \n toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(60)) AS ts, \n avg(value) AS per_series_value \n FROM signoz_metrics.distributed_samples_v4 AS points \n INNER JOIN (\n SELECT fingerprint \n FROM signoz_metrics.time_series_v4 \n WHERE metric_name IN ('request_total_threshold_above_at_least_once') \n AND LOWER(temporality) LIKE LOWER('cumulative') \n AND __normalized = false \n GROUP BY fingerprint\n ) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint \n WHERE metric_name IN ('request_total_threshold_above_at_least_once') \n AND unix_milli >= {{.start_timestamp_ms}} \n AND unix_milli < {{.end_timestamp_ms}} \n GROUP BY fingerprint, ts \n ORDER BY fingerprint, ts\n), \n__spatial_aggregation_cte AS (\n SELECT \n ts, \n avg(per_series_value) AS value \n FROM __temporal_aggregation_cte \n WHERE isNaN(per_series_value) = 0 \n GROUP BY ts\n) \nSELECT * FROM __spatial_aggregation_cte \nORDER BY ts"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":5,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":15,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":12,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":31,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":23,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":58,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":71,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":45,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":81,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":86,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":91,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":5,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":15,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":20,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":31,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":46,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":58,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":71,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":76,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":81,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":86,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":91,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":5,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":15,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":20,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":45,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":60,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":65,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":34,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":75,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":5,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":15,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":20,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":30,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":40,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":50,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":55,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":60,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":65,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":70,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":75,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":524288,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":1048576,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":1572864,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":2097152,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":3770016,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":5642880,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":10515744,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":11038632,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":11561520,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":12084408,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":12607296,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":13130184,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":524288,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":1048576,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":1572864,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":2097152,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":3770016,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":5642880,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":10515744,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":11038632,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":11561520,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":12084408,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":12607296,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":13130184,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
|
||||
|
||||
Reference in New Issue
Block a user