Compare commits

..

47 Commits

Author SHA1 Message Date
swapnil-signoz
2d8a00bf18 fix: update error code for service not found 2026-03-20 20:53:33 +05:30
swapnil-signoz
f1b26b310f Merge branch 'main' into refactor/cloud-integration-impl-store 2026-03-20 20:51:44 +05:30
swapnil-signoz
d6caa4f2c7 Merge branch 'main' into refactor/cloud-integration-impl-store 2026-03-18 14:08:14 +05:30
swapnil-signoz
f86371566d refactor: clean up 2026-03-18 13:45:31 +05:30
swapnil-signoz
9115803084 Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-18 13:42:43 +05:30
swapnil-signoz
0c14d8f966 refactor: review comments 2026-03-18 13:40:17 +05:30
swapnil-signoz
7afb461af8 Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-18 11:14:33 +05:30
swapnil-signoz
a21fbb4ee0 refactor: clean up 2026-03-18 11:14:05 +05:30
swapnil-signoz
0369842f3d refactor: clean up 2026-03-17 23:40:14 +05:30
swapnil-signoz
59cd96562a Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-17 23:10:54 +05:30
swapnil-signoz
cc4475cab7 refactor: updating store methods 2026-03-17 23:10:15 +05:30
swapnil-signoz
ac8c648420 Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-17 21:09:47 +05:30
swapnil-signoz
bede6be4b8 feat: adding method for service id creation 2026-03-17 21:09:26 +05:30
swapnil-signoz
dd3d60e6df Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-17 20:49:31 +05:30
swapnil-signoz
538ab686d2 refactor: using serviceID type 2026-03-17 20:49:17 +05:30
swapnil-signoz
936a325cb9 Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-17 17:25:58 +05:30
swapnil-signoz
c6cdcd0143 refactor: renaming service type to service id 2026-03-17 17:25:29 +05:30
swapnil-signoz
cd9211d718 refactor: clean up types 2026-03-17 17:04:27 +05:30
swapnil-signoz
0601c28782 feat: adding integration test 2026-03-17 11:02:46 +05:30
swapnil-signoz
580610dbfa Merge branch 'main' into refactor/cloud-integration-impl-store 2026-03-16 23:02:19 +05:30
swapnil-signoz
2d2aa02a81 refactor: split upsert store method 2026-03-16 18:27:42 +05:30
swapnil-signoz
dd9723ad13 Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-16 17:42:03 +05:30
swapnil-signoz
3651469416 Merge branch 'main' of https://github.com/SigNoz/signoz into refactor/cloud-integration-types 2026-03-16 17:41:52 +05:30
swapnil-signoz
febce75734 refactor: update Dashboard struct comments and remove unused fields 2026-03-16 17:41:28 +05:30
swapnil-signoz
e1616f3487 Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-16 17:36:15 +05:30
swapnil-signoz
4b94287ac7 refactor: add comments for backward compatibility in PostableAgentCheckInRequest 2026-03-16 15:48:20 +05:30
swapnil-signoz
1575c7c54c refactor: streamlining types 2026-03-16 15:39:32 +05:30
swapnil-signoz
8def3f835b refactor: adding comments and removed wrong code 2026-03-16 11:10:53 +05:30
swapnil-signoz
11ed15f4c5 feat: implement cloud integration store 2026-03-14 17:05:02 +05:30
swapnil-signoz
f47877cca9 Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-14 17:01:51 +05:30
swapnil-signoz
bb2b9215ba fix: correct GetService signature and remove shadowed Data field 2026-03-14 16:59:07 +05:30
swapnil-signoz
3111904223 Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-14 16:36:35 +05:30
swapnil-signoz
003e2c30d8 Merge branch 'main' into refactor/cloud-integration-types 2026-03-14 16:25:35 +05:30
swapnil-signoz
00fe516d10 refactor: update cloud integration types and module interface 2026-03-14 16:25:16 +05:30
swapnil-signoz
0305f4f7db refactor: using struct for map 2026-03-13 16:09:26 +05:30
swapnil-signoz
c60019a6dc Merge branch 'main' into refactor/cloud-integration-types 2026-03-12 23:41:22 +05:30
swapnil-signoz
acde2a37fa feat: adding updated types for cloud integration 2026-03-12 23:40:44 +05:30
swapnil-signoz
945241a52a Merge branch 'main' into refactor/cloud-integration-types 2026-03-12 19:45:50 +05:30
swapnil-signoz
e967f80c86 Merge branch 'main' into refactor/cloud-integration-types 2026-03-02 16:39:42 +05:30
swapnil-signoz
a09dc325de Merge branch 'main' into refactor/cloud-integration-impl-store 2026-03-02 16:39:20 +05:30
swapnil-signoz
379b4f7fc4 refactor: removing interface check 2026-03-02 14:50:37 +05:30
swapnil-signoz
5e536ae077 Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-02 14:49:35 +05:30
swapnil-signoz
234585e642 Merge branch 'main' into refactor/cloud-integration-types 2026-03-02 14:49:19 +05:30
swapnil-signoz
2cc14f1ad4 Merge branch 'main' into refactor/cloud-integration-impl-store 2026-03-02 14:49:00 +05:30
swapnil-signoz
dc4ed4d239 feat: adding sql store implementation 2026-03-02 14:44:56 +05:30
swapnil-signoz
7281c36873 refactor: store interfaces to use local types and error 2026-03-02 13:27:46 +05:30
swapnil-signoz
40288776e8 feat: adding cloud integration type for refactor 2026-02-28 16:59:14 +05:30
123 changed files with 974 additions and 1324 deletions

View File

@@ -35,7 +35,7 @@ linters:
- identical
sloglint:
no-mixed-args: true
attr-only: true
kv-only: true
no-global: all
context: all
static-msg: true

View File

@@ -4,15 +4,12 @@ import (
"context"
"log/slog"
"github.com/spf13/cobra"
"github.com/SigNoz/signoz/cmd"
"github.com/SigNoz/signoz/pkg/analytics"
"github.com/SigNoz/signoz/pkg/authn"
"github.com/SigNoz/signoz/pkg/authz"
"github.com/SigNoz/signoz/pkg/authz/openfgaauthz"
"github.com/SigNoz/signoz/pkg/authz/openfgaschema"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/gateway"
"github.com/SigNoz/signoz/pkg/gateway/noopgateway"
@@ -31,6 +28,7 @@ import (
"github.com/SigNoz/signoz/pkg/version"
"github.com/SigNoz/signoz/pkg/zeus"
"github.com/SigNoz/signoz/pkg/zeus/noopzeus"
"github.com/spf13/cobra"
)
func registerServer(parentCmd *cobra.Command, logger *slog.Logger) {
@@ -92,37 +90,37 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
},
)
if err != nil {
logger.ErrorContext(ctx, "failed to create signoz", errors.Attr(err))
logger.ErrorContext(ctx, "failed to create signoz", "error", err)
return err
}
server, err := app.NewServer(config, signoz)
if err != nil {
logger.ErrorContext(ctx, "failed to create server", errors.Attr(err))
logger.ErrorContext(ctx, "failed to create server", "error", err)
return err
}
if err := server.Start(ctx); err != nil {
logger.ErrorContext(ctx, "failed to start server", errors.Attr(err))
logger.ErrorContext(ctx, "failed to start server", "error", err)
return err
}
signoz.Start(ctx)
if err := signoz.Wait(ctx); err != nil {
logger.ErrorContext(ctx, "failed to start signoz", errors.Attr(err))
logger.ErrorContext(ctx, "failed to start signoz", "error", err)
return err
}
err = server.Stop(ctx)
if err != nil {
logger.ErrorContext(ctx, "failed to stop server", errors.Attr(err))
logger.ErrorContext(ctx, "failed to stop server", "error", err)
return err
}
err = signoz.Stop(ctx)
if err != nil {
logger.ErrorContext(ctx, "failed to stop signoz", errors.Attr(err))
logger.ErrorContext(ctx, "failed to stop signoz", "error", err)
return err
}

View File

@@ -5,18 +5,16 @@ import (
"log/slog"
"time"
"github.com/spf13/cobra"
"github.com/SigNoz/signoz/cmd"
"github.com/SigNoz/signoz/ee/authn/callbackauthn/oidccallbackauthn"
"github.com/SigNoz/signoz/ee/authn/callbackauthn/samlcallbackauthn"
"github.com/SigNoz/signoz/ee/authz/openfgaauthz"
eequerier "github.com/SigNoz/signoz/ee/querier"
"github.com/SigNoz/signoz/ee/authz/openfgaschema"
"github.com/SigNoz/signoz/ee/gateway/httpgateway"
enterpriselicensing "github.com/SigNoz/signoz/ee/licensing"
"github.com/SigNoz/signoz/ee/licensing/httplicensing"
"github.com/SigNoz/signoz/ee/modules/dashboard/impldashboard"
eequerier "github.com/SigNoz/signoz/ee/querier"
enterpriseapp "github.com/SigNoz/signoz/ee/query-service/app"
"github.com/SigNoz/signoz/ee/sqlschema/postgressqlschema"
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
@@ -25,7 +23,6 @@ import (
"github.com/SigNoz/signoz/pkg/analytics"
"github.com/SigNoz/signoz/pkg/authn"
"github.com/SigNoz/signoz/pkg/authz"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/gateway"
"github.com/SigNoz/signoz/pkg/licensing"
@@ -41,6 +38,7 @@ import (
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/version"
"github.com/SigNoz/signoz/pkg/zeus"
"github.com/spf13/cobra"
)
func registerServer(parentCmd *cobra.Command, logger *slog.Logger) {
@@ -71,7 +69,7 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
// add enterprise sqlstore factories to the community sqlstore factories
sqlstoreFactories := signoz.NewSQLStoreProviderFactories()
if err := sqlstoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory(), sqlstorehook.NewInstrumentationFactory())); err != nil {
logger.ErrorContext(ctx, "failed to add postgressqlstore factory", errors.Attr(err))
logger.ErrorContext(ctx, "failed to add postgressqlstore factory", "error", err)
return err
}
@@ -134,37 +132,37 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
)
if err != nil {
logger.ErrorContext(ctx, "failed to create signoz", errors.Attr(err))
logger.ErrorContext(ctx, "failed to create signoz", "error", err)
return err
}
server, err := enterpriseapp.NewServer(config, signoz)
if err != nil {
logger.ErrorContext(ctx, "failed to create server", errors.Attr(err))
logger.ErrorContext(ctx, "failed to create server", "error", err)
return err
}
if err := server.Start(ctx); err != nil {
logger.ErrorContext(ctx, "failed to start server", errors.Attr(err))
logger.ErrorContext(ctx, "failed to start server", "error", err)
return err
}
signoz.Start(ctx)
if err := signoz.Wait(ctx); err != nil {
logger.ErrorContext(ctx, "failed to start signoz", errors.Attr(err))
logger.ErrorContext(ctx, "failed to start signoz", "error", err)
return err
}
err = server.Stop(ctx)
if err != nil {
logger.ErrorContext(ctx, "failed to stop server", errors.Attr(err))
logger.ErrorContext(ctx, "failed to stop server", "error", err)
return err
}
err = signoz.Stop(ctx)
if err != nil {
logger.ErrorContext(ctx, "failed to stop signoz", errors.Attr(err))
logger.ErrorContext(ctx, "failed to stop signoz", "error", err)
return err
}

View File

@@ -4,10 +4,8 @@ import (
"log/slog"
"os"
"github.com/spf13/cobra"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/version"
"github.com/spf13/cobra"
)
var RootCmd = &cobra.Command{
@@ -22,7 +20,7 @@ var RootCmd = &cobra.Command{
func Execute(logger *slog.Logger) {
err := RootCmd.Execute()
if err != nil {
logger.ErrorContext(RootCmd.Context(), "error running command", errors.Attr(err))
logger.ErrorContext(RootCmd.Context(), "error running command", "error", err)
os.Exit(1)
}
}

4
conf/cache-config.yml Normal file
View File

@@ -0,0 +1,4 @@
provider: "inmemory"
inmemory:
ttl: 60m
cleanupInterval: 10m

25
conf/prometheus.yml Normal file
View File

@@ -0,0 +1,25 @@
# my global config
global:
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
- 127.0.0.1:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
- 'alerts.yml'
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs: []
remote_read:
- url: tcp://localhost:9000/signoz_metrics

View File

@@ -74,37 +74,37 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, orgID valuer.UUID
instrumentationtypes.CodeFunctionName: "getResults",
})
// TODO(srikanthccv): parallelize this?
p.logger.InfoContext(ctx, "fetching results for current period", slog.Any("anomaly_current_period_query", params.CurrentPeriodQuery))
p.logger.InfoContext(ctx, "fetching results for current period", "anomaly_current_period_query", params.CurrentPeriodQuery)
currentPeriodResults, err := p.querier.QueryRange(ctx, orgID, &params.CurrentPeriodQuery)
if err != nil {
return nil, err
}
p.logger.InfoContext(ctx, "fetching results for past period", slog.Any("anomaly_past_period_query", params.PastPeriodQuery))
p.logger.InfoContext(ctx, "fetching results for past period", "anomaly_past_period_query", params.PastPeriodQuery)
pastPeriodResults, err := p.querier.QueryRange(ctx, orgID, &params.PastPeriodQuery)
if err != nil {
return nil, err
}
p.logger.InfoContext(ctx, "fetching results for current season", slog.Any("anomaly_current_season_query", params.CurrentSeasonQuery))
p.logger.InfoContext(ctx, "fetching results for current season", "anomaly_current_season_query", params.CurrentSeasonQuery)
currentSeasonResults, err := p.querier.QueryRange(ctx, orgID, &params.CurrentSeasonQuery)
if err != nil {
return nil, err
}
p.logger.InfoContext(ctx, "fetching results for past season", slog.Any("anomaly_past_season_query", params.PastSeasonQuery))
p.logger.InfoContext(ctx, "fetching results for past season", "anomaly_past_season_query", params.PastSeasonQuery)
pastSeasonResults, err := p.querier.QueryRange(ctx, orgID, &params.PastSeasonQuery)
if err != nil {
return nil, err
}
p.logger.InfoContext(ctx, "fetching results for past 2 season", slog.Any("anomaly_past_2season_query", params.Past2SeasonQuery))
p.logger.InfoContext(ctx, "fetching results for past 2 season", "anomaly_past_2season_query", params.Past2SeasonQuery)
past2SeasonResults, err := p.querier.QueryRange(ctx, orgID, &params.Past2SeasonQuery)
if err != nil {
return nil, err
}
p.logger.InfoContext(ctx, "fetching results for past 3 season", slog.Any("anomaly_past_3season_query", params.Past3SeasonQuery))
p.logger.InfoContext(ctx, "fetching results for past 3 season", "anomaly_past_3season_query", params.Past3SeasonQuery)
past3SeasonResults, err := p.querier.QueryRange(ctx, orgID, &params.Past3SeasonQuery)
if err != nil {
return nil, err
@@ -212,17 +212,17 @@ func (p *BaseSeasonalProvider) getPredictedSeries(
if predictedValue < 0 {
// this should not happen (except when the data has extreme outliers)
// we will use the moving avg of the previous period series in this case
p.logger.WarnContext(ctx, "predicted value is less than 0 for series", slog.Float64("anomaly_predicted_value", predictedValue), slog.Any("anomaly_labels", series.Labels))
p.logger.WarnContext(ctx, "predicted value is less than 0 for series", "anomaly_predicted_value", predictedValue, "anomaly_labels", series.Labels)
predictedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
}
p.logger.DebugContext(ctx, "predicted value for series",
slog.Float64("anomaly_moving_avg", movingAvg),
slog.Float64("anomaly_avg", avg),
slog.Float64("anomaly_mean", mean),
slog.Any("anomaly_labels", series.Labels),
slog.Float64("anomaly_predicted_value", predictedValue),
slog.Float64("anomaly_curr", curr.Value),
"anomaly_moving_avg", movingAvg,
"anomaly_avg", avg,
"anomaly_mean", mean,
"anomaly_labels", series.Labels,
"anomaly_predicted_value", predictedValue,
"anomaly_curr", curr.Value,
)
predictedSeries.Values = append(predictedSeries.Values, &qbtypes.TimeSeriesValue{
Timestamp: curr.Timestamp,
@@ -412,7 +412,7 @@ func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, orgID valuer.UU
past3SeasonSeries := p.getMatchingSeries(ctx, past3SeasonResult, series)
stdDev := p.getStdDev(currentSeasonSeries)
p.logger.InfoContext(ctx, "calculated standard deviation for series", slog.Float64("anomaly_std_dev", stdDev), slog.Any("anomaly_labels", series.Labels))
p.logger.InfoContext(ctx, "calculated standard deviation for series", "anomaly_std_dev", stdDev, "anomaly_labels", series.Labels)
prevSeriesAvg := p.getAvg(pastPeriodSeries)
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
@@ -420,12 +420,12 @@ func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, orgID valuer.UU
past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries)
past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries)
p.logger.InfoContext(ctx, "calculated mean for series",
slog.Float64("anomaly_prev_series_avg", prevSeriesAvg),
slog.Float64("anomaly_current_season_series_avg", currentSeasonSeriesAvg),
slog.Float64("anomaly_past_season_series_avg", pastSeasonSeriesAvg),
slog.Float64("anomaly_past_2season_series_avg", past2SeasonSeriesAvg),
slog.Float64("anomaly_past_3season_series_avg", past3SeasonSeriesAvg),
slog.Any("anomaly_labels", series.Labels),
"anomaly_prev_series_avg", prevSeriesAvg,
"anomaly_current_season_series_avg", currentSeasonSeriesAvg,
"anomaly_past_season_series_avg", pastSeasonSeriesAvg,
"anomaly_past_2season_series_avg", past2SeasonSeriesAvg,
"anomaly_past_3season_series_avg", past3SeasonSeriesAvg,
"anomaly_labels", series.Labels,
)
predictedSeries := p.getPredictedSeries(

View File

@@ -3,7 +3,6 @@ package oidccallbackauthn
import (
"context"
"fmt"
"log/slog"
"net/url"
"github.com/SigNoz/signoz/pkg/authn"
@@ -151,7 +150,7 @@ func (a *AuthN) HandleCallback(ctx context.Context, query url.Values) (*authtype
// Some IDPs return a single group as a string instead of an array
groups = append(groups, g)
default:
a.settings.Logger().WarnContext(ctx, "oidc: unsupported groups type", slog.String("type", fmt.Sprintf("%T", claimValue)))
a.settings.Logger().WarnContext(ctx, "oidc: unsupported groups type", "type", fmt.Sprintf("%T", claimValue))
}
}
}

View File

@@ -3,11 +3,8 @@ package httplicensing
import (
"context"
"encoding/json"
"log/slog"
"time"
"github.com/tidwall/gjson"
"github.com/SigNoz/signoz/ee/licensing/licensingstore/sqllicensingstore"
"github.com/SigNoz/signoz/pkg/analytics"
"github.com/SigNoz/signoz/pkg/errors"
@@ -19,6 +16,7 @@ import (
"github.com/SigNoz/signoz/pkg/types/licensetypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/SigNoz/signoz/pkg/zeus"
"github.com/tidwall/gjson"
)
type provider struct {
@@ -57,7 +55,7 @@ func (provider *provider) Start(ctx context.Context) error {
err := provider.Validate(ctx)
if err != nil {
provider.settings.Logger().ErrorContext(ctx, "failed to validate license from upstream server", errors.Attr(err))
provider.settings.Logger().ErrorContext(ctx, "failed to validate license from upstream server", "error", err)
}
for {
@@ -67,7 +65,7 @@ func (provider *provider) Start(ctx context.Context) error {
case <-tick.C:
err := provider.Validate(ctx)
if err != nil {
provider.settings.Logger().ErrorContext(ctx, "failed to validate license from upstream server", errors.Attr(err))
provider.settings.Logger().ErrorContext(ctx, "failed to validate license from upstream server", "error", err)
}
}
}
@@ -135,7 +133,7 @@ func (provider *provider) Refresh(ctx context.Context, organizationID valuer.UUI
if errors.Ast(err, errors.TypeNotFound) {
return nil
}
provider.settings.Logger().ErrorContext(ctx, "license validation failed", slog.String("org_id", organizationID.StringValue()))
provider.settings.Logger().ErrorContext(ctx, "license validation failed", "org_id", organizationID.StringValue())
return err
}

View File

@@ -6,6 +6,7 @@ import (
"encoding/json"
"io"
"net/http"
"runtime/debug"
anomalyV2 "github.com/SigNoz/signoz/ee/anomaly"
"github.com/SigNoz/signoz/pkg/errors"
@@ -53,6 +54,26 @@ func (h *handler) QueryRange(rw http.ResponseWriter, req *http.Request) {
return
}
defer func() {
if r := recover(); r != nil {
stackTrace := string(debug.Stack())
queryJSON, _ := json.Marshal(queryRangeRequest)
h.set.Logger.ErrorContext(ctx, "panic in QueryRange",
"error", r,
"user", claims.UserID,
"payload", string(queryJSON),
"stacktrace", stackTrace,
)
render.Error(rw, errors.NewInternalf(
errors.CodeInternal,
"Something went wrong on our end. It's not you, it's us. Our team is notified about it. Reach out to support if issue persists.",
))
}
}()
if err := queryRangeRequest.Validate(); err != nil {
render.Error(rw, err)
return

View File

@@ -4,15 +4,10 @@ import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"time"
signozerrors "github.com/SigNoz/signoz/pkg/errors"
"log/slog"
"github.com/SigNoz/signoz/ee/query-service/constants"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/http/render"
@@ -20,6 +15,7 @@ import (
"github.com/SigNoz/signoz/pkg/types/featuretypes"
"github.com/SigNoz/signoz/pkg/types/licensetypes"
"github.com/SigNoz/signoz/pkg/valuer"
"log/slog"
)
func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
@@ -42,7 +38,7 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
slog.DebugContext(ctx, "fetching license")
license, err := ah.Signoz.Licensing.GetActive(ctx, orgID)
if err != nil {
slog.ErrorContext(ctx, "failed to fetch license", signozerrors.Attr(err))
slog.ErrorContext(ctx, "failed to fetch license", "error", err)
} else if license == nil {
slog.DebugContext(ctx, "no active license found")
} else {
@@ -55,7 +51,7 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
// merge featureSet and zeusFeatures in featureSet with higher priority to zeusFeatures
featureSet = MergeFeatureSets(zeusFeatures, featureSet)
} else {
slog.ErrorContext(ctx, "failed to fetch zeus features", signozerrors.Attr(err))
slog.ErrorContext(ctx, "failed to fetch zeus features", "error", err)
}
}
}

View File

@@ -7,7 +7,6 @@ import (
"net/http"
"github.com/SigNoz/signoz/ee/query-service/anomaly"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/render"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
@@ -36,7 +35,7 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
queryRangeParams, apiErrorObj := baseapp.ParseQueryRangeParams(r)
if apiErrorObj != nil {
slog.ErrorContext(r.Context(), "error parsing metric query range params", errors.Attr(apiErrorObj.Err))
slog.ErrorContext(r.Context(), "error parsing metric query range params", "error", apiErrorObj.Err)
RespondError(w, apiErrorObj, nil)
return
}
@@ -45,7 +44,7 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
// add temporality for each metric
temporalityErr := aH.PopulateTemporality(r.Context(), orgID, queryRangeParams)
if temporalityErr != nil {
slog.ErrorContext(r.Context(), "error while adding temporality for metrics", errors.Attr(temporalityErr))
slog.ErrorContext(r.Context(), "error while adding temporality for metrics", "error", temporalityErr)
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
return
}

View File

@@ -8,21 +8,16 @@ import (
_ "net/http/pprof" // http profiler
"slices"
"go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux"
"go.opentelemetry.io/otel/propagation"
"github.com/SigNoz/signoz/pkg/cache/memorycache"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/queryparser"
"github.com/SigNoz/signoz/pkg/ruler/rulestore/sqlrulestore"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux"
"go.opentelemetry.io/otel/propagation"
"github.com/gorilla/handlers"
"github.com/rs/cors"
"github.com/soheilhy/cmux"
"github.com/SigNoz/signoz/ee/query-service/app/api"
"github.com/SigNoz/signoz/ee/query-service/rules"
"github.com/SigNoz/signoz/ee/query-service/usage"
@@ -36,8 +31,8 @@ import (
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/web"
"log/slog"
"github.com/rs/cors"
"github.com/soheilhy/cmux"
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
@@ -52,6 +47,7 @@ import (
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
"github.com/SigNoz/signoz/pkg/query-service/utils"
"log/slog"
)
// Server runs HTTP, Mux and a grpc server
@@ -211,7 +207,6 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
r := baseapp.NewRouter()
am := middleware.NewAuthZ(s.signoz.Instrumentation.Logger(), s.signoz.Modules.OrgGetter, s.signoz.Authz)
r.Use(middleware.NewRecovery(s.signoz.Instrumentation.Logger()).Wrap)
r.Use(otelmux.Middleware(
"apiserver",
otelmux.WithMeterProvider(s.signoz.Instrumentation.MeterProvider()),
@@ -220,6 +215,7 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
otelmux.WithFilter(func(r *http.Request) bool {
return !slices.Contains([]string{"/api/v1/health"}, r.URL.Path)
}),
otelmux.WithPublicEndpoint(),
))
r.Use(middleware.NewIdentN(s.signoz.IdentNResolver, s.signoz.Sharder, s.signoz.Instrumentation.Logger()).Wrap)
r.Use(middleware.NewTimeout(s.signoz.Instrumentation.Logger(),
@@ -308,7 +304,7 @@ func (s *Server) Start(ctx context.Context) error {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
// normal exit, nothing to do
default:
slog.Error("Could not start HTTP server", errors.Attr(err))
slog.Error("Could not start HTTP server", "error", err)
}
s.unavailableChannel <- healthcheck.Unavailable
}()
@@ -318,7 +314,7 @@ func (s *Server) Start(ctx context.Context) error {
err = http.ListenAndServe(baseconst.DebugHttpPort, nil)
if err != nil {
slog.Error("Could not start pprof server", errors.Attr(err))
slog.Error("Could not start pprof server", "error", err)
}
}()
@@ -326,7 +322,7 @@ func (s *Server) Start(ctx context.Context) error {
slog.Info("Starting OpAmp Websocket server", "addr", baseconst.OpAmpWsEndpoint)
err := s.opampServer.Start(baseconst.OpAmpWsEndpoint)
if err != nil {
slog.Error("opamp ws server failed to start", errors.Attr(err))
slog.Error("opamp ws server failed to start", "error", err)
s.unavailableChannel <- healthcheck.Unavailable
}
}()

View File

@@ -12,7 +12,6 @@ import (
"github.com/SigNoz/signoz/ee/query-service/anomaly"
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/common"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/transition"
@@ -309,7 +308,7 @@ func (r *AnomalyRule) buildAndRunQueryV5(ctx context.Context, orgID valuer.UUID,
filteredSeries, filterErr := r.BaseRule.FilterNewSeries(ctx, ts, seriesToProcess)
// In case of error we log the error and continue with the original series
if filterErr != nil {
r.logger.ErrorContext(ctx, "Error filtering new series, ", errors.Attr(filterErr), "rule_name", r.Name())
r.logger.ErrorContext(ctx, "Error filtering new series, ", "error", filterErr, "rule_name", r.Name())
} else {
seriesToProcess = filteredSeries
}
@@ -392,7 +391,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (int, error) {
result, err := tmpl.Expand()
if err != nil {
result = fmt.Sprintf("<error expanding template: %s>", err)
r.logger.ErrorContext(ctx, "Expanding alert template failed", errors.Attr(err), "data", tmplData, "rule_name", r.Name())
r.logger.ErrorContext(ctx, "Expanding alert template failed", "error", err, "data", tmplData, "rule_name", r.Name())
}
return result
}
@@ -468,7 +467,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (int, error) {
for fp, a := range r.Active {
labelsJSON, err := json.Marshal(a.QueryResultLables)
if err != nil {
r.logger.ErrorContext(ctx, "error marshaling labels", errors.Attr(err), "labels", a.Labels)
r.logger.ErrorContext(ctx, "error marshaling labels", "error", err, "labels", a.Labels)
}
if _, ok := resultFPs[fp]; !ok {
// If the alert was previously firing, keep it around for a given

View File

@@ -6,16 +6,14 @@ import (
"time"
"log/slog"
"github.com/google/uuid"
"github.com/SigNoz/signoz/pkg/errors"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
"github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/google/uuid"
"log/slog"
)
func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error) {
@@ -153,7 +151,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
)
if err != nil {
slog.Error("failed to prepare a new threshold rule for test", "name", alertname, errors.Attr(err))
slog.Error("failed to prepare a new threshold rule for test", "name", alertname, "error", err)
return 0, basemodel.BadRequest(err)
}
@@ -175,7 +173,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
)
if err != nil {
slog.Error("failed to prepare a new promql rule for test", "name", alertname, errors.Attr(err))
slog.Error("failed to prepare a new promql rule for test", "name", alertname, "error", err)
return 0, basemodel.BadRequest(err)
}
} else if parsedRule.RuleType == ruletypes.RuleTypeAnomaly {
@@ -195,7 +193,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
baserules.WithMetadataStore(opts.ManagerOpts.MetadataStore),
)
if err != nil {
slog.Error("failed to prepare a new anomaly rule for test", "name", alertname, errors.Attr(err))
slog.Error("failed to prepare a new anomaly rule for test", "name", alertname, "error", err)
return 0, basemodel.BadRequest(err)
}
} else {
@@ -207,7 +205,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
alertsFound, err := rule.Eval(ctx, ts)
if err != nil {
slog.Error("evaluating rule failed", "rule", rule.Name(), errors.Attr(err))
slog.Error("evaluating rule failed", "rule", rule.Name(), "error", err)
return 0, basemodel.InternalError(fmt.Errorf("rule evaluation failed"))
}
rule.SendAlerts(ctx, ts, 0, time.Minute, opts.NotifyFunc)

View File

@@ -15,7 +15,6 @@ import (
"github.com/google/uuid"
"github.com/SigNoz/signoz/ee/query-service/model"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/query-service/utils/encryption"
@@ -77,14 +76,14 @@ func (lm *Manager) Start(ctx context.Context) error {
func (lm *Manager) UploadUsage(ctx context.Context) {
organizations, err := lm.orgGetter.ListByOwnedKeyRange(ctx)
if err != nil {
slog.ErrorContext(ctx, "failed to get organizations", errors.Attr(err))
slog.ErrorContext(ctx, "failed to get organizations", "error", err)
return
}
for _, organization := range organizations {
// check if license is present or not
license, err := lm.licenseService.GetActive(ctx, organization.ID)
if err != nil {
slog.ErrorContext(ctx, "failed to get active license", errors.Attr(err))
slog.ErrorContext(ctx, "failed to get active license", "error", err)
return
}
if license == nil {
@@ -116,7 +115,7 @@ func (lm *Manager) UploadUsage(ctx context.Context) {
dbusages := []model.UsageDB{}
err := lm.clickhouseConn.Select(ctx, &dbusages, fmt.Sprintf(query, db, db), time.Now().Add(-(24 * time.Hour)))
if err != nil && !strings.Contains(err.Error(), "doesn't exist") {
slog.ErrorContext(ctx, "failed to get usage from clickhouse", errors.Attr(err))
slog.ErrorContext(ctx, "failed to get usage from clickhouse", "error", err)
return
}
for _, u := range dbusages {
@@ -136,14 +135,14 @@ func (lm *Manager) UploadUsage(ctx context.Context) {
for _, usage := range usages {
usageDataBytes, err := encryption.Decrypt([]byte(usage.ExporterID[:32]), []byte(usage.Data))
if err != nil {
slog.ErrorContext(ctx, "error while decrypting usage data", errors.Attr(err))
slog.ErrorContext(ctx, "error while decrypting usage data", "error", err)
return
}
usageData := model.Usage{}
err = json.Unmarshal(usageDataBytes, &usageData)
if err != nil {
slog.ErrorContext(ctx, "error while unmarshalling usage data", errors.Attr(err))
slog.ErrorContext(ctx, "error while unmarshalling usage data", "error", err)
return
}
@@ -164,13 +163,13 @@ func (lm *Manager) UploadUsage(ctx context.Context) {
body, errv2 := json.Marshal(payload)
if errv2 != nil {
slog.ErrorContext(ctx, "error while marshalling usage payload", errors.Attr(errv2))
slog.ErrorContext(ctx, "error while marshalling usage payload", "error", errv2)
return
}
errv2 = lm.zeus.PutMeters(ctx, payload.LicenseKey.String(), body)
if errv2 != nil {
slog.ErrorContext(ctx, "failed to upload usage", errors.Attr(errv2))
slog.ErrorContext(ctx, "failed to upload usage", "error", errv2)
// not returning error here since it is captured in the failed count
return
}

View File

@@ -4,12 +4,11 @@ import (
"context"
"database/sql"
"github.com/uptrace/bun"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
)
type provider struct {
@@ -114,7 +113,7 @@ WHERE
defer func() {
if err := constraintsRows.Close(); err != nil {
provider.settings.Logger().ErrorContext(ctx, "error closing rows", errors.Attr(err))
provider.settings.Logger().ErrorContext(ctx, "error closing rows", "error", err)
}
}()
@@ -175,7 +174,7 @@ WHERE
defer func() {
if err := foreignKeyConstraintsRows.Close(); err != nil {
provider.settings.Logger().ErrorContext(ctx, "error closing rows", errors.Attr(err))
provider.settings.Logger().ErrorContext(ctx, "error closing rows", "error", err)
}
}()
@@ -244,7 +243,7 @@ ORDER BY index_name, column_position`, string(name))
defer func() {
if err := rows.Close(); err != nil {
provider.settings.Logger().ErrorContext(ctx, "error closing rows", errors.Attr(err))
provider.settings.Logger().ErrorContext(ctx, "error closing rows", "error", err)
}
}()

View File

@@ -87,14 +87,14 @@ func (batcher *Batcher) Add(ctx context.Context, alerts ...*alertmanagertypes.Po
// batch could be.
if d := len(alerts) - batcher.config.Capacity; d > 0 {
alerts = alerts[d:]
batcher.logger.WarnContext(ctx, "alert batch larger than queue capacity, dropping alerts", slog.Int("num_dropped", d), slog.Int("capacity", batcher.config.Capacity))
batcher.logger.WarnContext(ctx, "alert batch larger than queue capacity, dropping alerts", "num_dropped", d, "capacity", batcher.config.Capacity)
}
// If the queue is full, remove the oldest alerts in favor
// of newer ones.
if d := (len(batcher.queue) + len(alerts)) - batcher.config.Capacity; d > 0 {
batcher.queue = batcher.queue[d:]
batcher.logger.WarnContext(ctx, "alert batch queue full, dropping alerts", slog.Int("num_dropped", d))
batcher.logger.WarnContext(ctx, "alert batch queue full, dropping alerts", "num_dropped", d)
}
batcher.queue = append(batcher.queue, alerts...)

View File

@@ -112,7 +112,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
return false, err
}
n.logger.DebugContext(ctx, "extracted group key", slog.String("key", string(key)))
n.logger.DebugContext(ctx, "extracted group key", "key", key)
data := notify.GetTemplateData(ctx, n.tmpl, as, n.logger)
tmpl := notify.TmplText(n.tmpl, data, &err)

View File

@@ -21,7 +21,7 @@ func NewReceiverIntegrations(nc alertmanagertypes.Receiver, tmpl *template.Templ
errs types.MultiError
integrations []notify.Integration
add = func(name string, i int, rs notify.ResolvedSender, f func(l *slog.Logger) (notify.Notifier, error)) {
n, err := f(logger.With(slog.String("integration", name)))
n, err := f(logger.With("integration", name))
if err != nil {
errs.Add(err)
return

View File

@@ -74,7 +74,7 @@ func NewDispatcher(
route: r,
marker: mk,
timeout: to,
logger: l.With(slog.String("component", "signoz-dispatcher")),
logger: l.With("component", "signoz-dispatcher"),
metrics: m,
limits: lim,
notificationManager: n,
@@ -111,24 +111,24 @@ func (d *Dispatcher) run(it provider.AlertIterator) {
if !ok || alertWrapper == nil {
// Iterator exhausted for some reason.
if err := it.Err(); err != nil {
d.logger.ErrorContext(d.ctx, "Error on alert update", errors.Attr(err))
d.logger.ErrorContext(d.ctx, "Error on alert update", "err", err)
}
return
}
alert := alertWrapper.Data
d.logger.DebugContext(d.ctx, "SigNoz Custom Dispatcher: Received alert", slog.Any("alert", alert))
d.logger.DebugContext(d.ctx, "SigNoz Custom Dispatcher: Received alert", "alert", alert)
// Log errors but keep trying.
if err := it.Err(); err != nil {
d.logger.ErrorContext(d.ctx, "Error on alert update", errors.Attr(err))
d.logger.ErrorContext(d.ctx, "Error on alert update", "err", err)
continue
}
now := time.Now()
channels, err := d.notificationManager.Match(d.ctx, d.orgID, getRuleIDFromAlert(alert), alert.Labels)
if err != nil {
d.logger.ErrorContext(d.ctx, "Error on alert match", errors.Attr(err))
d.logger.ErrorContext(d.ctx, "Error on alert match", "err", err)
continue
}
for _, channel := range channels {
@@ -278,7 +278,7 @@ func (d *Dispatcher) processAlert(alert *types.Alert, route *dispatch.Route) {
ruleId := getRuleIDFromAlert(alert)
config, err := d.notificationManager.GetNotificationConfig(d.orgID, ruleId)
if err != nil {
d.logger.ErrorContext(d.ctx, "error getting alert notification config", slog.String("rule_id", ruleId), errors.Attr(err))
d.logger.ErrorContext(d.ctx, "error getting alert notification config", "rule_id", ruleId, "error", err)
return
}
renotifyInterval := config.Renotify.RenotifyInterval
@@ -310,7 +310,7 @@ func (d *Dispatcher) processAlert(alert *types.Alert, route *dispatch.Route) {
// If the group does not exist, create it. But check the limit first.
if limit := d.limits.MaxNumberOfAggregationGroups(); limit > 0 && d.aggrGroupsNum >= limit {
d.metrics.aggrGroupLimitReached.Inc()
d.logger.ErrorContext(d.ctx, "Too many aggregation groups, cannot create new group for alert", slog.Int("groups", d.aggrGroupsNum), slog.Int("limit", limit), slog.String("alert", alert.Name()))
d.logger.ErrorContext(d.ctx, "Too many aggregation groups, cannot create new group for alert", "groups", d.aggrGroupsNum, "limit", limit, "alert", alert.Name())
return
}
@@ -328,7 +328,7 @@ func (d *Dispatcher) processAlert(alert *types.Alert, route *dispatch.Route) {
go ag.run(func(ctx context.Context, alerts ...*types.Alert) bool {
_, _, err := d.stage.Exec(ctx, d.logger, alerts...)
if err != nil {
logger := d.logger.With(slog.Int("num_alerts", len(alerts)), errors.Attr(err))
logger := d.logger.With("num_alerts", len(alerts), "err", err)
if errors.Is(ctx.Err(), context.Canceled) {
// It is expected for the context to be canceled on
// configuration reload or shutdown. In this case, the
@@ -382,7 +382,7 @@ func newAggrGroup(ctx context.Context, labels model.LabelSet, r *dispatch.Route,
}
ag.ctx, ag.cancel = context.WithCancel(ctx)
ag.logger = logger.With(slog.Any("aggr_group", ag))
ag.logger = logger.With("aggr_group", ag)
// Set an initial one-time wait before flushing
// the first batch of notifications.
@@ -457,7 +457,7 @@ func (ag *aggrGroup) stop() {
// insert inserts the alert into the aggregation group.
func (ag *aggrGroup) insert(alert *types.Alert) {
if err := ag.alerts.Set(alert); err != nil {
ag.logger.ErrorContext(ag.ctx, "error on set alert", errors.Attr(err))
ag.logger.ErrorContext(ag.ctx, "error on set alert", "err", err)
}
// Immediately trigger a flush if the wait duration for this
@@ -497,7 +497,7 @@ func (ag *aggrGroup) flush(notify func(...*types.Alert) bool) {
}
sort.Stable(alertsSlice)
ag.logger.DebugContext(ag.ctx, "flushing", slog.String("alerts", fmt.Sprintf("%v", alertsSlice)))
ag.logger.DebugContext(ag.ctx, "flushing", "alerts", fmt.Sprintf("%v", alertsSlice))
if notify(alertsSlice...) {
// Delete all resolved alerts as we just sent a notification for them,
@@ -505,7 +505,7 @@ func (ag *aggrGroup) flush(notify func(...*types.Alert) bool) {
// that each resolved alert has not fired again during the flush as then
// we would delete an active alert thinking it was resolved.
if err := ag.alerts.DeleteIfNotModified(resolvedSlice); err != nil {
ag.logger.ErrorContext(ag.ctx, "error on delete alerts", errors.Attr(err))
ag.logger.ErrorContext(ag.ctx, "error on delete alerts", "err", err)
}
}
}

View File

@@ -10,6 +10,10 @@ import (
"github.com/prometheus/alertmanager/types"
"golang.org/x/sync/errgroup"
"github.com/SigNoz/signoz/pkg/alertmanager/alertmanagernotify"
"github.com/SigNoz/signoz/pkg/alertmanager/nfmanager"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types/alertmanagertypes"
"github.com/prometheus/alertmanager/dispatch"
"github.com/prometheus/alertmanager/featurecontrol"
"github.com/prometheus/alertmanager/inhibit"
@@ -21,11 +25,6 @@ import (
"github.com/prometheus/alertmanager/timeinterval"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/SigNoz/signoz/pkg/alertmanager/alertmanagernotify"
"github.com/SigNoz/signoz/pkg/alertmanager/nfmanager"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types/alertmanagertypes"
)
var (
@@ -73,7 +72,7 @@ type Server struct {
func New(ctx context.Context, logger *slog.Logger, registry prometheus.Registerer, srvConfig Config, orgID string, stateStore alertmanagertypes.StateStore, nfManager nfmanager.NotificationManager) (*Server, error) {
server := &Server{
logger: logger.With(slog.String("pkg", "go.signoz.io/pkg/alertmanager/alertmanagerserver")),
logger: logger.With("pkg", "go.signoz.io/pkg/alertmanager/alertmanagerserver"),
registry: registry,
srvConfig: srvConfig,
orgID: orgID,
@@ -140,7 +139,7 @@ func New(ctx context.Context, logger *slog.Logger, registry prometheus.Registere
server.silences.Maintenance(server.srvConfig.Silences.MaintenanceInterval, snapfnoop, server.stopc, func() (int64, error) {
// Delete silences older than the retention period.
if _, err := server.silences.GC(); err != nil {
server.logger.ErrorContext(ctx, "silence garbage collection", errors.Attr(err))
server.logger.ErrorContext(ctx, "silence garbage collection", "error", err)
// Don't return here - we need to snapshot our state first.
}
@@ -169,7 +168,7 @@ func New(ctx context.Context, logger *slog.Logger, registry prometheus.Registere
defer server.wg.Done()
server.nflog.Maintenance(server.srvConfig.NFLog.MaintenanceInterval, snapfnoop, server.stopc, func() (int64, error) {
if _, err := server.nflog.GC(); err != nil {
server.logger.ErrorContext(ctx, "notification log garbage collection", errors.Attr(err))
server.logger.ErrorContext(ctx, "notification log garbage collection", "error", err)
// Don't return without saving the current state.
}
@@ -247,7 +246,7 @@ func (server *Server) SetConfig(ctx context.Context, alertmanagerConfig *alertma
for _, rcv := range config.Receivers {
if _, found := activeReceivers[rcv.Name]; !found {
// No need to build a receiver if no route is using it.
server.logger.InfoContext(ctx, "skipping creation of receiver not referenced by any route", slog.String("receiver", rcv.Name))
server.logger.InfoContext(ctx, "skipping creation of receiver not referenced by any route", "receiver", rcv.Name)
continue
}
integrations, err := alertmanagernotify.NewReceiverIntegrations(rcv, server.tmpl, server.logger)

View File

@@ -2,7 +2,6 @@ package rulebasednotification
import (
"context"
"log/slog"
"strings"
"sync"
@@ -267,7 +266,7 @@ func (r *provider) convertLabelSetToEnv(ctx context.Context, labelSet model.Labe
}
if logForReview {
r.settings.Logger().InfoContext(ctx, "found label set with conflicting prefix dotted keys", slog.Any("labels", labelSet))
r.settings.Logger().InfoContext(ctx, "found label set with conflicting prefix dotted keys", "labels", labelSet)
}
return env

View File

@@ -2,7 +2,6 @@ package alertmanager
import (
"context"
"log/slog"
"sync"
"github.com/prometheus/alertmanager/featurecontrol"
@@ -75,7 +74,7 @@ func (service *Service) SyncServers(ctx context.Context) error {
for _, org := range orgs {
config, _, err := service.getConfig(ctx, org.ID.StringValue())
if err != nil {
service.settings.Logger().ErrorContext(ctx, "failed to get alertmanager config for org", slog.String("org_id", org.ID.StringValue()), errors.Attr(err))
service.settings.Logger().ErrorContext(ctx, "failed to get alertmanager config for org", "org_id", org.ID.StringValue(), "error", err)
continue
}
@@ -83,7 +82,7 @@ func (service *Service) SyncServers(ctx context.Context) error {
if _, ok := service.servers[org.ID.StringValue()]; !ok {
server, err := service.newServer(ctx, org.ID.StringValue())
if err != nil {
service.settings.Logger().ErrorContext(ctx, "failed to create alertmanager server", slog.String("org_id", org.ID.StringValue()), errors.Attr(err))
service.settings.Logger().ErrorContext(ctx, "failed to create alertmanager server", "org_id", org.ID.StringValue(), "error", err)
continue
}
@@ -91,13 +90,13 @@ func (service *Service) SyncServers(ctx context.Context) error {
}
if service.servers[org.ID.StringValue()].Hash() == config.StoreableConfig().Hash {
service.settings.Logger().DebugContext(ctx, "skipping alertmanager sync for org", slog.String("org_id", org.ID.StringValue()), slog.String("hash", config.StoreableConfig().Hash))
service.settings.Logger().DebugContext(ctx, "skipping alertmanager sync for org", "org_id", org.ID.StringValue(), "hash", config.StoreableConfig().Hash)
continue
}
err = service.servers[org.ID.StringValue()].SetConfig(ctx, config)
if err != nil {
service.settings.Logger().ErrorContext(ctx, "failed to set config for alertmanager server", slog.String("org_id", org.ID.StringValue()), errors.Attr(err))
service.settings.Logger().ErrorContext(ctx, "failed to set config for alertmanager server", "org_id", org.ID.StringValue(), "error", err)
continue
}
}
@@ -164,7 +163,7 @@ func (service *Service) Stop(ctx context.Context) error {
for _, server := range service.servers {
if err := server.Stop(ctx); err != nil {
errs = append(errs, err)
service.settings.Logger().ErrorContext(ctx, "failed to stop alertmanager server", errors.Attr(err))
service.settings.Logger().ErrorContext(ctx, "failed to stop alertmanager server", "error", err)
}
}
@@ -192,7 +191,7 @@ func (service *Service) newServer(ctx context.Context, orgID string) (*alertmana
// defaults from an upstream upgrade or something similar) trigger a DB update
// so that other code paths reading directly from the store see the up-to-date config.
if storedHash == config.StoreableConfig().Hash {
service.settings.Logger().DebugContext(ctx, "skipping config store update for org", slog.String("org_id", orgID), slog.String("hash", config.StoreableConfig().Hash))
service.settings.Logger().DebugContext(ctx, "skipping config store update for org", "org_id", orgID, "hash", config.StoreableConfig().Hash)
return server, nil
}

View File

@@ -4,9 +4,8 @@ import (
"context"
"time"
"github.com/prometheus/common/model"
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
"github.com/prometheus/common/model"
amConfig "github.com/prometheus/alertmanager/config"
@@ -67,7 +66,7 @@ func New(ctx context.Context, providerSettings factory.ProviderSettings, config
func (provider *provider) Start(ctx context.Context) error {
if err := provider.service.SyncServers(ctx); err != nil {
provider.settings.Logger().ErrorContext(ctx, "failed to sync alertmanager servers", errors.Attr(err))
provider.settings.Logger().ErrorContext(ctx, "failed to sync alertmanager servers", "error", err)
return err
}
@@ -79,7 +78,7 @@ func (provider *provider) Start(ctx context.Context) error {
return nil
case <-ticker.C:
if err := provider.service.SyncServers(ctx); err != nil {
provider.settings.Logger().ErrorContext(ctx, "failed to sync alertmanager servers", errors.Attr(err))
provider.settings.Logger().ErrorContext(ctx, "failed to sync alertmanager servers", "error", err)
}
}
}

View File

@@ -2,10 +2,8 @@ package segmentanalytics
import (
"context"
"log/slog"
"github.com/SigNoz/signoz/pkg/analytics"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/types/analyticstypes"
segment "github.com/segmentio/analytics-go/v3"
@@ -47,14 +45,14 @@ func (provider *provider) Send(ctx context.Context, messages ...analyticstypes.M
for _, message := range messages {
err := provider.client.Enqueue(message)
if err != nil {
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", errors.Attr(err))
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", "err", err)
}
}
}
func (provider *provider) TrackGroup(ctx context.Context, group, event string, properties map[string]any) {
if properties == nil {
provider.settings.Logger().WarnContext(ctx, "empty attributes received, skipping event", slog.String("group", group), slog.String("event", event))
provider.settings.Logger().WarnContext(ctx, "empty attributes received, skipping event", "group", group, "event", event)
return
}
@@ -69,13 +67,13 @@ func (provider *provider) TrackGroup(ctx context.Context, group, event string, p
},
})
if err != nil {
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", errors.Attr(err))
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", "err", err)
}
}
func (provider *provider) TrackUser(ctx context.Context, group, user, event string, properties map[string]any) {
if properties == nil {
provider.settings.Logger().WarnContext(ctx, "empty attributes received, skipping event", slog.String("user", user), slog.String("group", group), slog.String("event", event))
provider.settings.Logger().WarnContext(ctx, "empty attributes received, skipping event", "user", user, "group", group, "event", event)
return
}
@@ -90,13 +88,13 @@ func (provider *provider) TrackUser(ctx context.Context, group, user, event stri
},
})
if err != nil {
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", errors.Attr(err))
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", "err", err)
}
}
func (provider *provider) IdentifyGroup(ctx context.Context, group string, traits map[string]any) {
if traits == nil {
provider.settings.Logger().WarnContext(ctx, "empty attributes received, skipping identify", slog.String("group", group))
provider.settings.Logger().WarnContext(ctx, "empty attributes received, skipping identify", "group", group)
return
}
@@ -106,7 +104,7 @@ func (provider *provider) IdentifyGroup(ctx context.Context, group string, trait
Traits: analyticstypes.NewTraitsFromMap(traits),
})
if err != nil {
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", errors.Attr(err))
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", "err", err)
}
// identify the group using the stats user
@@ -116,13 +114,13 @@ func (provider *provider) IdentifyGroup(ctx context.Context, group string, trait
Traits: analyticstypes.NewTraitsFromMap(traits),
})
if err != nil {
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", errors.Attr(err))
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", "err", err)
}
}
func (provider *provider) IdentifyUser(ctx context.Context, group, user string, traits map[string]any) {
if traits == nil {
provider.settings.Logger().WarnContext(ctx, "empty attributes received, skipping identify", slog.String("user", user), slog.String("group", group))
provider.settings.Logger().WarnContext(ctx, "empty attributes received, skipping identify", "user", user, "group", group)
return
}
@@ -132,7 +130,7 @@ func (provider *provider) IdentifyUser(ctx context.Context, group, user string,
Traits: analyticstypes.NewTraitsFromMap(traits),
})
if err != nil {
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", errors.Attr(err))
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", "err", err)
}
// associate the user with the group
@@ -142,13 +140,13 @@ func (provider *provider) IdentifyUser(ctx context.Context, group, user string,
Traits: analyticstypes.NewTraits().Set("id", group), // A trait is required
})
if err != nil {
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", errors.Attr(err))
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", "err", err)
}
}
func (provider *provider) Stop(ctx context.Context) error {
if err := provider.client.Close(); err != nil {
provider.settings.Logger().WarnContext(ctx, "unable to close segment client", errors.Attr(err))
provider.settings.Logger().WarnContext(ctx, "unable to close segment client", "err", err)
}
close(provider.stopC)

View File

@@ -2,21 +2,19 @@ package googlecallbackauthn
import (
"context"
"log/slog"
"net/url"
"github.com/coreos/go-oidc/v3/oidc"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
admin "google.golang.org/api/admin/directory/v1"
"google.golang.org/api/option"
"github.com/SigNoz/signoz/pkg/authn"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/http/client"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/coreos/go-oidc/v3/oidc"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
admin "google.golang.org/api/admin/directory/v1"
"google.golang.org/api/option"
)
const (
@@ -74,13 +72,13 @@ func (a *AuthN) HandleCallback(ctx context.Context, query url.Values) (*authtype
}
if err := query.Get("error"); err != "" {
a.settings.Logger().ErrorContext(ctx, "google: error while authenticating", slog.String("error", err), slog.String("error_description", query.Get("error_description")))
a.settings.Logger().ErrorContext(ctx, "google: error while authenticating", "error", err, "error_description", query.Get("error_description"))
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "google: error while authenticating").WithAdditional(query.Get("error_description"))
}
state, err := authtypes.NewStateFromString(query.Get("state"))
if err != nil {
a.settings.Logger().ErrorContext(ctx, "google: invalid state", errors.Attr(err))
a.settings.Logger().ErrorContext(ctx, "google: invalid state", "error", err)
return nil, errors.Newf(errors.TypeInvalidInput, authtypes.ErrCodeInvalidState, "google: invalid state").WithAdditional(err.Error())
}
@@ -94,11 +92,11 @@ func (a *AuthN) HandleCallback(ctx context.Context, query url.Values) (*authtype
if err != nil {
var retrieveError *oauth2.RetrieveError
if errors.As(err, &retrieveError) {
a.settings.Logger().ErrorContext(ctx, "google: failed to get token", errors.Attr(err), slog.String("error_description", retrieveError.ErrorDescription), slog.String("body", string(retrieveError.Body)))
a.settings.Logger().ErrorContext(ctx, "google: failed to get token", "error", err, "error_description", retrieveError.ErrorDescription, "body", string(retrieveError.Body))
return nil, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "google: failed to get token").WithAdditional(retrieveError.ErrorDescription)
}
a.settings.Logger().ErrorContext(ctx, "google: failed to get token", errors.Attr(err))
a.settings.Logger().ErrorContext(ctx, "google: failed to get token", "error", err)
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "google: failed to get token")
}
@@ -110,7 +108,7 @@ func (a *AuthN) HandleCallback(ctx context.Context, query url.Values) (*authtype
verifier := oidcProvider.Verifier(&oidc.Config{ClientID: authDomain.AuthDomainConfig().Google.ClientID})
idToken, err := verifier.Verify(ctx, rawIDToken)
if err != nil {
a.settings.Logger().ErrorContext(ctx, "google: failed to verify token", errors.Attr(err))
a.settings.Logger().ErrorContext(ctx, "google: failed to verify token", "error", err)
return nil, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "google: failed to verify token")
}
@@ -122,18 +120,18 @@ func (a *AuthN) HandleCallback(ctx context.Context, query url.Values) (*authtype
}
if err := idToken.Claims(&claims); err != nil {
a.settings.Logger().ErrorContext(ctx, "google: missing or invalid claims", errors.Attr(err))
a.settings.Logger().ErrorContext(ctx, "google: missing or invalid claims", "error", err)
return nil, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "google: missing or invalid claims").WithAdditional(err.Error())
}
if claims.HostedDomain != authDomain.StorableAuthDomain().Name {
a.settings.Logger().ErrorContext(ctx, "google: unexpected hd claim", slog.String("expected", authDomain.StorableAuthDomain().Name), slog.String("actual", claims.HostedDomain))
a.settings.Logger().ErrorContext(ctx, "google: unexpected hd claim", "expected", authDomain.StorableAuthDomain().Name, "actual", claims.HostedDomain)
return nil, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "google: unexpected hd claim")
}
if !authDomain.AuthDomainConfig().Google.InsecureSkipEmailVerified {
if !claims.EmailVerified {
a.settings.Logger().ErrorContext(ctx, "google: email is not verified", slog.String("email", claims.Email))
a.settings.Logger().ErrorContext(ctx, "google: email is not verified", "email", claims.Email)
return nil, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "google: email is not verified")
}
}
@@ -147,7 +145,7 @@ func (a *AuthN) HandleCallback(ctx context.Context, query url.Values) (*authtype
if authDomain.AuthDomainConfig().Google.FetchGroups {
groups, err = a.fetchGoogleWorkspaceGroups(ctx, claims.Email, authDomain.AuthDomainConfig().Google)
if err != nil {
a.settings.Logger().ErrorContext(ctx, "google: could not fetch groups", errors.Attr(err))
a.settings.Logger().ErrorContext(ctx, "google: could not fetch groups", "error", err)
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "google: could not fetch groups").WithAdditional(err.Error())
}
@@ -191,7 +189,7 @@ func (a *AuthN) fetchGoogleWorkspaceGroups(ctx context.Context, userEmail string
jwtConfig, err := google.JWTConfigFromJSON([]byte(config.ServiceAccountJSON), admin.AdminDirectoryGroupReadonlyScope)
if err != nil {
a.settings.Logger().ErrorContext(ctx, "google: invalid service account credentials", errors.Attr(err))
a.settings.Logger().ErrorContext(ctx, "google: invalid service account credentials", "error", err)
return nil, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "invalid service account credentials")
}
@@ -201,7 +199,7 @@ func (a *AuthN) fetchGoogleWorkspaceGroups(ctx context.Context, userEmail string
adminService, err := admin.NewService(ctx, option.WithHTTPClient(jwtConfig.Client(customCtx)))
if err != nil {
a.settings.Logger().ErrorContext(ctx, "google: unable to create directory service", errors.Attr(err))
a.settings.Logger().ErrorContext(ctx, "google: unable to create directory service", "error", err)
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "unable to create directory service")
}
@@ -223,7 +221,7 @@ func (a *AuthN) getGroups(ctx context.Context, adminService *admin.Service, user
groupList, err := call.Context(ctx).Do()
if err != nil {
a.settings.Logger().ErrorContext(ctx, "google: unable to list groups", errors.Attr(err))
a.settings.Logger().ErrorContext(ctx, "google: unable to list groups", "error", err)
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "unable to list groups")
}
@@ -238,7 +236,7 @@ func (a *AuthN) getGroups(ctx context.Context, adminService *admin.Service, user
if fetchTransitive {
transitiveGroups, err := a.getGroups(ctx, adminService, group.Email, fetchTransitive, checkedGroups)
if err != nil {
a.settings.Logger().ErrorContext(ctx, "google: unable to list transitive groups", errors.Attr(err))
a.settings.Logger().ErrorContext(ctx, "google: unable to list transitive groups", "error", err)
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "unable to list transitive groups")
}
userGroups = append(userGroups, transitiveGroups...)

View File

@@ -2,19 +2,18 @@ package rediscache
import (
"context"
"fmt"
"log/slog"
"strings"
"time"
"github.com/redis/go-redis/extra/redisotel/v9"
"github.com/redis/go-redis/v9"
"fmt"
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/types/cachetypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/redis/go-redis/extra/redisotel/v9"
"github.com/redis/go-redis/v9"
)
type provider struct {
@@ -77,6 +76,6 @@ func (c *provider) DeleteMany(ctx context.Context, orgID valuer.UUID, cacheKeys
}
if err := c.client.Del(ctx, updatedCacheKeys...).Err(); err != nil {
c.settings.Logger().ErrorContext(ctx, "error deleting cache keys", slog.Any("cache_keys", cacheKeys), errors.Attr(err))
c.settings.Logger().ErrorContext(ctx, "error deleting cache keys", "cache_keys", cacheKeys, "error", err)
}
}

View File

@@ -2,7 +2,6 @@ package noopemailing
import (
"context"
"log/slog"
"github.com/SigNoz/signoz/pkg/emailing"
"github.com/SigNoz/signoz/pkg/factory"
@@ -25,6 +24,6 @@ func New(ctx context.Context, providerSettings factory.ProviderSettings, config
}
func (provider *provider) SendHTML(ctx context.Context, to string, subject string, templateName emailtypes.TemplateName, data map[string]any) error {
provider.settings.Logger().WarnContext(ctx, "using noop provider, no email will be sent", slog.String("to", to), slog.String("subject", subject))
provider.settings.Logger().WarnContext(ctx, "using noop provider, no email will be sent", "to", to, "subject", subject)
return nil
}

View File

@@ -6,7 +6,6 @@ import (
"github.com/SigNoz/signoz/pkg/emailing"
"github.com/SigNoz/signoz/pkg/emailing/templatestore/filetemplatestore"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/smtp/client"
"github.com/SigNoz/signoz/pkg/types/emailtypes"
@@ -29,7 +28,7 @@ func New(ctx context.Context, providerSettings factory.ProviderSettings, config
// Try to create a template store. If it fails, use an empty store.
store, err := filetemplatestore.NewStore(ctx, config.Templates.Directory, emailtypes.Templates, settings.Logger())
if err != nil {
settings.Logger().ErrorContext(ctx, "failed to create template store, using empty store", errors.Attr(err))
settings.Logger().ErrorContext(ctx, "failed to create template store, using empty store", "error", err)
store = filetemplatestore.NewEmptyStore()
}
@@ -88,7 +87,7 @@ func (provider *provider) SendHTML(ctx context.Context, to string, subject strin
content, err := emailtypes.NewContent(template, data)
if err != nil {
provider.settings.Logger().ErrorContext(ctx, "failed to create email content", errors.Attr(err))
provider.settings.Logger().ErrorContext(ctx, "failed to create email content", "error", err)
return err
}

View File

@@ -45,7 +45,7 @@ func NewStore(ctx context.Context, baseDir string, templates []emailtypes.Templa
t, err := parseTemplateFile(filepath.Join(baseDir, fi.Name()), templateName)
if err != nil {
logger.ErrorContext(ctx, "failed to parse template file", slog.Any("template", templateName), slog.String("path", filepath.Join(baseDir, fi.Name())), errors.Attr(err))
logger.ErrorContext(ctx, "failed to parse template file", "template", templateName, "path", filepath.Join(baseDir, fi.Name()), "error", err)
continue
}
@@ -54,7 +54,7 @@ func NewStore(ctx context.Context, baseDir string, templates []emailtypes.Templa
}
if err := checkMissingTemplates(templates, foundTemplates); err != nil {
logger.ErrorContext(ctx, "some templates are missing", errors.Attr(err))
logger.ErrorContext(ctx, "some templates are missing", "error", err)
}
return &store{fs: fs}, nil

View File

@@ -16,7 +16,6 @@ var (
CodeCanceled = Code{"canceled"}
CodeTimeout = Code{"timeout"}
CodeUnknown = Code{"unknown"}
CodeFatal = Code{"fatal"}
CodeLicenseUnavailable = Code{"license_unavailable"}
)

View File

@@ -7,7 +7,7 @@ import (
)
// base is the fundamental struct that implements the error interface.
// The order of the struct is 'TCMEUAS'.
// The order of the struct is 'TCMEUA'.
type base struct {
// t denotes the custom type of the error.
t typ
@@ -21,30 +21,16 @@ type base struct {
u string
// a denotes any additional error messages (if present).
a []string
// s contains the stacktrace captured at error creation time.
s fmt.Stringer
}
// Stacktrace returns the stacktrace captured at error creation time, formatted as a string.
func (b *base) Stacktrace() string {
if b.s == nil {
return ""
}
return b.s.String()
}
// WithStacktrace replaces the auto-captured stacktrace with a pre-formatted string
// and returns a new base error.
func (b *base) WithStacktrace(s string) *base {
return &base{
t: b.t,
c: b.c,
m: b.m,
e: b.e,
u: b.u,
a: b.a,
s: rawStacktrace(s),
}
func (b *base) LogValue() slog.Value {
return slog.GroupValue(
slog.String("type", b.t.s),
slog.String("code", b.c.s),
slog.String("message", b.m),
slog.String("url", b.u),
slog.Any("additional", b.a),
)
}
// base implements Error interface.
@@ -65,7 +51,6 @@ func New(t typ, code Code, message string) *base {
e: nil,
u: "",
a: []string{},
s: newStackTrace(),
}
}
@@ -76,7 +61,6 @@ func Newf(t typ, code Code, format string, args ...any) *base {
c: code,
m: fmt.Sprintf(format, args...),
e: nil,
s: newStackTrace(),
}
}
@@ -88,7 +72,6 @@ func Wrapf(cause error, t typ, code Code, format string, args ...any) *base {
c: code,
m: fmt.Sprintf(format, args...),
e: cause,
s: newStackTrace(),
}
}
@@ -99,17 +82,12 @@ func Wrap(cause error, t typ, code Code, message string) *base {
c: code,
m: message,
e: cause,
s: newStackTrace(),
}
}
// WithAdditionalf adds an additional error message to the existing error.
func WithAdditionalf(cause error, format string, args ...any) *base {
t, c, m, e, u, a := Unwrapb(cause)
var s fmt.Stringer
if original, ok := cause.(*base); ok {
s = original.s
}
b := &base{
t: t,
c: c,
@@ -117,7 +95,6 @@ func WithAdditionalf(cause error, format string, args ...any) *base {
e: e,
u: u,
a: a,
s: s,
}
return b.WithAdditional(append(a, fmt.Sprintf(format, args...))...)
@@ -132,7 +109,6 @@ func (b *base) WithUrl(u string) *base {
e: b.e,
u: u,
a: b.a,
s: b.s,
}
}
@@ -145,7 +121,6 @@ func (b *base) WithAdditional(a ...string) *base {
e: b.e,
u: b.u,
a: a,
s: b.s,
}
}
@@ -248,8 +223,3 @@ func WrapTimeoutf(cause error, code Code, format string, args ...any) *base {
func NewTimeoutf(code Code, format string, args ...any) *base {
return Newf(TypeTimeout, code, format, args...)
}
// Attr returns an slog.Attr with a standardized "exception" key for the given error.
func Attr(err error) slog.Attr {
return slog.Any("exception", err)
}

View File

@@ -51,22 +51,3 @@ func TestUnwrapb(t *testing.T) {
atyp, _, _, _, _, _ = Unwrapb(oerr)
assert.Equal(t, TypeInternal, atyp)
}
func TestAttr(t *testing.T) {
err := New(TypeInternal, MustNewCode("test_code"), "test error")
attr := Attr(err)
assert.Equal(t, "exception", attr.Key)
assert.Equal(t, err, attr.Value.Any())
}
func TestWithStacktrace(t *testing.T) {
err := New(TypeInternal, MustNewCode("test_code"), "panic").WithStacktrace("custom stack trace")
assert.Equal(t, "custom stack trace", err.Stacktrace())
assert.Equal(t, "panic", err.Error())
typ, code, message, _, _, _ := Unwrapb(err)
assert.Equal(t, TypeInternal, typ)
assert.Equal(t, "test_code", code.String())
assert.Equal(t, "panic", message)
}

View File

@@ -1,45 +0,0 @@
package errors
import (
"fmt"
"runtime"
"strings"
)
// stacktrace holds a snapshot of program counters.
type stacktrace []uintptr
// newStackTrace captures a stack trace, skipping 3 frames to record the
// snapshot at the origin of the error:
// 1. runtime.Callers
// 2. newStackTrace
// 3. the constructor (New, Newf, Wrapf, Wrap)
//
// Inspired by https://github.com/thanos-io/thanos/blob/main/pkg/errors/stacktrace.go
func newStackTrace() stacktrace {
const depth = 16
pc := make([]uintptr, depth)
n := runtime.Callers(3, pc)
return stacktrace(pc[:n:n])
}
// String formats the stacktrace as function/file/line pairs.
func (s stacktrace) String() string {
var buf strings.Builder
frames := runtime.CallersFrames(s)
for {
frame, more := frames.Next()
fmt.Fprintf(&buf, "%s\n\t%s:%d\n", frame.Function, frame.File, frame.Line)
if !more {
break
}
}
return buf.String()
}
// rawStacktrace holds a pre-formatted stacktrace string.
type rawStacktrace string
func (r rawStacktrace) String() string {
return string(r)
}

View File

@@ -12,7 +12,6 @@ var (
TypeCanceled = typ{"canceled"}
TypeTimeout = typ{"timeout"}
TypeUnexpected = typ{"unexpected"} // Generic mismatch of expectations
TypeFatal = typ{"fatal"} // Unrecoverable failure (e.g. panic)
TypeLicenseUnavailable = typ{"license-unavailable"}
)

View File

@@ -37,7 +37,7 @@ func NewRegistry(logger *slog.Logger, services ...NamedService) (*Registry, erro
}
return &Registry{
logger: logger.With(slog.String("pkg", "go.signoz.io/pkg/factory")),
logger: logger.With("pkg", "go.signoz.io/pkg/factory"),
services: m,
startCh: make(chan error, 1),
stopCh: make(chan error, len(services)),
@@ -47,7 +47,7 @@ func NewRegistry(logger *slog.Logger, services ...NamedService) (*Registry, erro
func (r *Registry) Start(ctx context.Context) {
for _, s := range r.services.GetInOrder() {
go func(s NamedService) {
r.logger.InfoContext(ctx, "starting service", slog.Any("service", s.Name()))
r.logger.InfoContext(ctx, "starting service", "service", s.Name())
err := s.Start(ctx)
r.startCh <- err
}(s)
@@ -61,11 +61,11 @@ func (r *Registry) Wait(ctx context.Context) error {
select {
case <-ctx.Done():
r.logger.InfoContext(ctx, "caught context error, exiting", errors.Attr(ctx.Err()))
r.logger.InfoContext(ctx, "caught context error, exiting", "error", ctx.Err())
case s := <-interrupt:
r.logger.InfoContext(ctx, "caught interrupt signal, exiting", slog.Any("signal", s))
r.logger.InfoContext(ctx, "caught interrupt signal, exiting", "signal", s)
case err := <-r.startCh:
r.logger.ErrorContext(ctx, "caught service error, exiting", errors.Attr(err))
r.logger.ErrorContext(ctx, "caught service error, exiting", "error", err)
return err
}
@@ -75,7 +75,7 @@ func (r *Registry) Wait(ctx context.Context) error {
func (r *Registry) Stop(ctx context.Context) error {
for _, s := range r.services.GetInOrder() {
go func(s NamedService) {
r.logger.InfoContext(ctx, "stopping service", slog.Any("service", s.Name()))
r.logger.InfoContext(ctx, "stopping service", "service", s.Name())
err := s.Stop(ctx)
r.stopCh <- err
}(s)

View File

@@ -35,7 +35,7 @@ type scoped struct {
func NewScopedProviderSettings(settings ProviderSettings, pkgName string) *scoped {
return &scoped{
logger: settings.Logger.With(slog.String("logger", pkgName)),
logger: settings.Logger.With("logger", pkgName),
meter: settings.MeterProvider.Meter(pkgName),
tracer: settings.TracerProvider.Tracer(pkgName),
prometheusRegisterer: settings.PrometheusRegisterer,

View File

@@ -2,7 +2,6 @@ package configflagger
import (
"context"
"log/slog"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
@@ -35,7 +34,7 @@ func New(ctx context.Context, ps factory.ProviderSettings, c flagger.Config, reg
feature, _, err := registry.GetByString(name)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", slog.String("name", name), slog.String("kind", "boolean"))
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", "name", name, "kind", "boolean")
continue
}
return nil, err
@@ -53,7 +52,7 @@ func New(ctx context.Context, ps factory.ProviderSettings, c flagger.Config, reg
feature, _, err := registry.GetByString(name)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", slog.String("name", name), slog.String("kind", "string"))
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", "name", name, "kind", "string")
continue
}
return nil, err
@@ -71,7 +70,7 @@ func New(ctx context.Context, ps factory.ProviderSettings, c flagger.Config, reg
feature, _, err := registry.GetByString(name)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", slog.String("name", name), slog.String("kind", "float"))
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", "name", name, "kind", "float")
continue
}
return nil, err
@@ -89,7 +88,7 @@ func New(ctx context.Context, ps factory.ProviderSettings, c flagger.Config, reg
feature, _, err := registry.GetByString(name)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", slog.String("name", name), slog.String("kind", "integer"))
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", "name", name, "kind", "integer")
continue
}
return nil, err
@@ -107,7 +106,7 @@ func New(ctx context.Context, ps factory.ProviderSettings, c flagger.Config, reg
feature, _, err := registry.GetByString(name)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", slog.String("name", name), slog.String("kind", "object"))
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", "name", name, "kind", "object")
continue
}
return nil, err

View File

@@ -2,13 +2,10 @@ package flagger
import (
"context"
"log/slog"
"github.com/open-feature/go-sdk/openfeature"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/types/featuretypes"
"github.com/open-feature/go-sdk/openfeature"
)
// Any feature flag provider has to implement this interface.
@@ -98,7 +95,7 @@ func (f *flagger) Boolean(ctx context.Context, flag featuretypes.Name, evalCtx f
// check if the feature is present in the default registry
feature, _, err := f.registry.Get(flag)
if err != nil {
f.settings.Logger().ErrorContext(ctx, "failed to get feature from default registry", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get feature from default registry", "error", err, "flag", flag)
return false, err
}
@@ -106,7 +103,7 @@ func (f *flagger) Boolean(ctx context.Context, flag featuretypes.Name, evalCtx f
defaultValue, _, err := featuretypes.VariantValue[bool](feature, feature.DefaultVariant)
if err != nil {
// something which should never happen
f.settings.Logger().ErrorContext(ctx, "failed to get default value from feature", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get default value from feature", "error", err, "flag", flag)
return false, err
}
@@ -115,7 +112,7 @@ func (f *flagger) Boolean(ctx context.Context, flag featuretypes.Name, evalCtx f
for _, client := range f.clients {
value, err := client.BooleanValue(ctx, flag.String(), defaultValue, evalCtx.Ctx())
if err != nil {
f.settings.Logger().ErrorContext(ctx, "failed to get value from client", errors.Attr(err), slog.Any("flag", flag), slog.String("client", client.Metadata().Name()))
f.settings.Logger().ErrorContext(ctx, "failed to get value from client", "error", err, "flag", flag, "client", client.Metadata().Name)
continue
}
@@ -131,7 +128,7 @@ func (f *flagger) String(ctx context.Context, flag featuretypes.Name, evalCtx fe
// check if the feature is present in the default registry
feature, _, err := f.registry.Get(flag)
if err != nil {
f.settings.Logger().ErrorContext(ctx, "failed to get feature from default registry", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get feature from default registry", "error", err, "flag", flag)
return "", err
}
@@ -139,7 +136,7 @@ func (f *flagger) String(ctx context.Context, flag featuretypes.Name, evalCtx fe
defaultValue, _, err := featuretypes.VariantValue[string](feature, feature.DefaultVariant)
if err != nil {
// something which should never happen
f.settings.Logger().ErrorContext(ctx, "failed to get default value from feature", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get default value from feature", "error", err, "flag", flag)
return "", err
}
@@ -148,7 +145,7 @@ func (f *flagger) String(ctx context.Context, flag featuretypes.Name, evalCtx fe
for _, client := range f.clients {
value, err := client.StringValue(ctx, flag.String(), defaultValue, evalCtx.Ctx())
if err != nil {
f.settings.Logger().WarnContext(ctx, "failed to get value from client", errors.Attr(err), slog.Any("flag", flag), slog.String("client", client.Metadata().Name()))
f.settings.Logger().WarnContext(ctx, "failed to get value from client", "error", err, "flag", flag, "client", client.Metadata().Name)
continue
}
@@ -164,7 +161,7 @@ func (f *flagger) Float(ctx context.Context, flag featuretypes.Name, evalCtx fea
// check if the feature is present in the default registry
feature, _, err := f.registry.Get(flag)
if err != nil {
f.settings.Logger().ErrorContext(ctx, "failed to get feature from default registry", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get feature from default registry", "error", err, "flag", flag)
return 0, err
}
@@ -172,7 +169,7 @@ func (f *flagger) Float(ctx context.Context, flag featuretypes.Name, evalCtx fea
defaultValue, _, err := featuretypes.VariantValue[float64](feature, feature.DefaultVariant)
if err != nil {
// something which should never happen
f.settings.Logger().ErrorContext(ctx, "failed to get default value from feature", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get default value from feature", "error", err, "flag", flag)
return 0, err
}
@@ -181,7 +178,7 @@ func (f *flagger) Float(ctx context.Context, flag featuretypes.Name, evalCtx fea
for _, client := range f.clients {
value, err := client.FloatValue(ctx, flag.String(), defaultValue, evalCtx.Ctx())
if err != nil {
f.settings.Logger().WarnContext(ctx, "failed to get value from client", errors.Attr(err), slog.Any("flag", flag), slog.String("client", client.Metadata().Name()))
f.settings.Logger().WarnContext(ctx, "failed to get value from client", "error", err, "flag", flag, "client", client.Metadata().Name)
continue
}
@@ -197,7 +194,7 @@ func (f *flagger) Int(ctx context.Context, flag featuretypes.Name, evalCtx featu
// check if the feature is present in the default registry
feature, _, err := f.registry.Get(flag)
if err != nil {
f.settings.Logger().ErrorContext(ctx, "failed to get feature from default registry", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get feature from default registry", "error", err, "flag", flag)
return 0, err
}
@@ -205,7 +202,7 @@ func (f *flagger) Int(ctx context.Context, flag featuretypes.Name, evalCtx featu
defaultValue, _, err := featuretypes.VariantValue[int64](feature, feature.DefaultVariant)
if err != nil {
// something which should never happen
f.settings.Logger().ErrorContext(ctx, "failed to get default value from feature", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get default value from feature", "error", err, "flag", flag)
return 0, err
}
@@ -214,7 +211,7 @@ func (f *flagger) Int(ctx context.Context, flag featuretypes.Name, evalCtx featu
for _, client := range f.clients {
value, err := client.IntValue(ctx, flag.String(), defaultValue, evalCtx.Ctx())
if err != nil {
f.settings.Logger().WarnContext(ctx, "failed to get value from client", errors.Attr(err), slog.Any("flag", flag), slog.String("client", client.Metadata().Name()))
f.settings.Logger().WarnContext(ctx, "failed to get value from client", "error", err, "flag", flag, "client", client.Metadata().Name)
continue
}
@@ -230,7 +227,7 @@ func (f *flagger) Object(ctx context.Context, flag featuretypes.Name, evalCtx fe
// check if the feature is present in the default registry
feature, _, err := f.registry.Get(flag)
if err != nil {
f.settings.Logger().ErrorContext(ctx, "failed to get feature from default registry", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get feature from default registry", "error", err, "flag", flag)
return nil, err
}
@@ -238,7 +235,7 @@ func (f *flagger) Object(ctx context.Context, flag featuretypes.Name, evalCtx fe
defaultValue, _, err := featuretypes.VariantValue[any](feature, feature.DefaultVariant)
if err != nil {
// something which should never happen
f.settings.Logger().ErrorContext(ctx, "failed to get default value from feature", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get default value from feature", "error", err, "flag", flag)
return nil, err
}
@@ -247,7 +244,7 @@ func (f *flagger) Object(ctx context.Context, flag featuretypes.Name, evalCtx fe
for _, client := range f.clients {
value, err := client.ObjectValue(ctx, flag.String(), defaultValue, evalCtx.Ctx())
if err != nil {
f.settings.Logger().WarnContext(ctx, "failed to get value from client", errors.Attr(err), slog.Any("flag", flag), slog.String("client", client.Metadata().Name()))
f.settings.Logger().WarnContext(ctx, "failed to get value from client", "error", err, "flag", flag, "client", client.Metadata().Name)
continue
}
@@ -265,7 +262,7 @@ func (f *flagger) BooleanOrEmpty(ctx context.Context, flag featuretypes.Name, ev
defaultValue := false
value, err := f.Boolean(ctx, flag, evalCtx)
if err != nil {
f.settings.Logger().ErrorContext(ctx, "failed to get value from flagger service", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get value from flagger service", "error", err, "flag", flag)
return defaultValue
}
return value
@@ -275,7 +272,7 @@ func (f *flagger) StringOrEmpty(ctx context.Context, flag featuretypes.Name, eva
defaultValue := ""
value, err := f.String(ctx, flag, evalCtx)
if err != nil {
f.settings.Logger().ErrorContext(ctx, "failed to get value from flagger service", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get value from flagger service", "error", err, "flag", flag)
return defaultValue
}
return value
@@ -285,7 +282,7 @@ func (f *flagger) FloatOrEmpty(ctx context.Context, flag featuretypes.Name, eval
defaultValue := 0.0
value, err := f.Float(ctx, flag, evalCtx)
if err != nil {
f.settings.Logger().ErrorContext(ctx, "failed to get value from flagger service", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get value from flagger service", "error", err, "flag", flag)
return defaultValue
}
return value
@@ -295,7 +292,7 @@ func (f *flagger) IntOrEmpty(ctx context.Context, flag featuretypes.Name, evalCt
defaultValue := int64(0)
value, err := f.Int(ctx, flag, evalCtx)
if err != nil {
f.settings.Logger().ErrorContext(ctx, "failed to get value from flagger service", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get value from flagger service", "error", err, "flag", flag)
return defaultValue
}
return value
@@ -305,7 +302,7 @@ func (f *flagger) ObjectOrEmpty(ctx context.Context, flag featuretypes.Name, eva
defaultValue := struct{}{}
value, err := f.Object(ctx, flag, evalCtx)
if err != nil {
f.settings.Logger().ErrorContext(ctx, "failed to get value from flagger service", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get value from flagger service", "error", err, "flag", flag)
return defaultValue
}
return value
@@ -339,7 +336,7 @@ func (f *flagger) List(ctx context.Context, evalCtx featuretypes.FlaggerEvaluati
for _, provider := range f.providers {
pFeatures, err := provider.List(ctx)
if err != nil {
f.settings.Logger().WarnContext(ctx, "failed to get features from provider", errors.Attr(err), slog.String("provider", provider.Metadata().Name))
f.settings.Logger().WarnContext(ctx, "failed to get features from provider", "error", err, "provider", provider.Metadata().Name)
continue
}

View File

@@ -9,8 +9,6 @@ import (
"github.com/gojek/heimdall/v7"
semconv "go.opentelemetry.io/otel/semconv/v1.27.0"
"github.com/SigNoz/signoz/pkg/errors"
)
type reqResLog struct {
@@ -53,7 +51,7 @@ func (plugin *reqResLog) OnRequestEnd(request *http.Request, response *http.Resp
bodybytes, err := io.ReadAll(response.Body)
if err != nil {
plugin.logger.DebugContext(request.Context(), "::UNABLE-TO-LOG-RESPONSE-BODY::", errors.Attr(err))
plugin.logger.DebugContext(request.Context(), "::UNABLE-TO-LOG-RESPONSE-BODY::", "error", err)
} else {
_ = response.Body.Close()
response.Body = io.NopCloser(bytes.NewBuffer(bodybytes))
@@ -71,7 +69,7 @@ func (plugin *reqResLog) OnRequestEnd(request *http.Request, response *http.Resp
func (plugin *reqResLog) OnError(request *http.Request, err error) {
host, port, _ := net.SplitHostPort(request.Host)
fields := []any{
errors.Attr(err),
"error", err,
string(semconv.HTTPRequestMethodKey), request.Method,
string(semconv.URLPathKey), request.URL.Path,
string(semconv.URLSchemeKey), request.URL.Scheme,

View File

@@ -42,7 +42,7 @@ func (middleware *AuthZ) ViewAccess(next http.HandlerFunc) http.HandlerFunc {
if claims.IdentNProvider == authtypes.IdentNProviderAPIKey.StringValue() {
if err := claims.IsViewer(); err != nil {
middleware.logger.WarnContext(ctx, authzDeniedMessage, slog.Any("claims", claims))
middleware.logger.WarnContext(ctx, authzDeniedMessage, "claims", claims)
render.Error(rw, err)
return
}
@@ -67,7 +67,7 @@ func (middleware *AuthZ) ViewAccess(next http.HandlerFunc) http.HandlerFunc {
selectors,
)
if err != nil {
middleware.logger.WarnContext(ctx, authzDeniedMessage, slog.Any("claims", claims))
middleware.logger.WarnContext(ctx, authzDeniedMessage, "claims", claims)
if errors.Asc(err, authtypes.ErrCodeAuthZForbidden) {
render.Error(rw, errors.New(errors.TypeForbidden, authtypes.ErrCodeAuthZForbidden, "only viewers/editors/admins can access this resource"))
return
@@ -92,7 +92,7 @@ func (middleware *AuthZ) EditAccess(next http.HandlerFunc) http.HandlerFunc {
if claims.IdentNProvider == authtypes.IdentNProviderAPIKey.StringValue() {
if err := claims.IsEditor(); err != nil {
middleware.logger.WarnContext(ctx, authzDeniedMessage, slog.Any("claims", claims))
middleware.logger.WarnContext(ctx, authzDeniedMessage, "claims", claims)
render.Error(rw, err)
return
}
@@ -116,7 +116,7 @@ func (middleware *AuthZ) EditAccess(next http.HandlerFunc) http.HandlerFunc {
selectors,
)
if err != nil {
middleware.logger.WarnContext(ctx, authzDeniedMessage, slog.Any("claims", claims))
middleware.logger.WarnContext(ctx, authzDeniedMessage, "claims", claims)
if errors.Asc(err, authtypes.ErrCodeAuthZForbidden) {
render.Error(rw, errors.New(errors.TypeForbidden, authtypes.ErrCodeAuthZForbidden, "only editors/admins can access this resource"))
return
@@ -141,7 +141,7 @@ func (middleware *AuthZ) AdminAccess(next http.HandlerFunc) http.HandlerFunc {
if claims.IdentNProvider == authtypes.IdentNProviderAPIKey.StringValue() {
if err := claims.IsAdmin(); err != nil {
middleware.logger.WarnContext(ctx, authzDeniedMessage, slog.Any("claims", claims))
middleware.logger.WarnContext(ctx, authzDeniedMessage, "claims", claims)
render.Error(rw, err)
return
}
@@ -164,7 +164,7 @@ func (middleware *AuthZ) AdminAccess(next http.HandlerFunc) http.HandlerFunc {
selectors,
)
if err != nil {
middleware.logger.WarnContext(ctx, authzDeniedMessage, slog.Any("claims", claims))
middleware.logger.WarnContext(ctx, authzDeniedMessage, "claims", claims)
if errors.Asc(err, authtypes.ErrCodeAuthZForbidden) {
render.Error(rw, errors.New(errors.TypeForbidden, authtypes.ErrCodeAuthZForbidden, "only admins can access this resource"))
return
@@ -188,7 +188,7 @@ func (middleware *AuthZ) SelfAccess(next http.HandlerFunc) http.HandlerFunc {
id := mux.Vars(req)["id"]
if err := claims.IsSelfAccess(id); err != nil {
middleware.logger.WarnContext(req.Context(), authzDeniedMessage, slog.Any("claims", claims))
middleware.logger.WarnContext(req.Context(), authzDeniedMessage, "claims", claims)
render.Error(rw, err)
return
}

View File

@@ -5,7 +5,6 @@ import (
"log/slog"
"net/http"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/identn"
"github.com/SigNoz/signoz/pkg/sharder"
"github.com/SigNoz/signoz/pkg/types"
@@ -53,7 +52,7 @@ func (m *IdentN) Wrap(next http.Handler) http.Handler {
ctx := r.Context()
claims := identity.ToClaims()
if err := m.sharder.IsMyOwnedKey(ctx, types.NewOrganizationKey(valuer.MustNewUUID(claims.OrgID))); err != nil {
m.logger.ErrorContext(ctx, identityCrossOrgMessage, slog.Any("claims", claims), errors.Attr(err))
m.logger.ErrorContext(ctx, identityCrossOrgMessage, "claims", claims, "error", err)
next.ServeHTTP(w, r)
return
}

View File

@@ -9,8 +9,6 @@ import (
"github.com/gorilla/mux"
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
"github.com/SigNoz/signoz/pkg/errors"
)
const (
@@ -29,7 +27,7 @@ func NewLogging(logger *slog.Logger, excludedRoutes []string) *Logging {
}
return &Logging{
logger: logger.With(slog.String("pkg", pkgname)),
logger: logger.With("pkg", pkgname),
excludedRoutes: excludedRoutesMap,
}
}
@@ -67,7 +65,7 @@ func (middleware *Logging) Wrap(next http.Handler) http.Handler {
string(semconv.HTTPServerRequestDurationName), time.Since(start),
)
if err != nil {
fields = append(fields, errors.Attr(err))
fields = append(fields, "error", err)
middleware.logger.ErrorContext(req.Context(), logMessage, fields...)
} else {
// when the status code is 400 or >=500, and the response body is not empty.

View File

@@ -1,38 +0,0 @@
package middleware
import (
"fmt"
"log/slog"
"net/http"
"runtime"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/render"
)
type Recovery struct {
logger *slog.Logger
}
func NewRecovery(logger *slog.Logger) *Recovery {
return &Recovery{
logger: logger.With(slog.String("pkg", pkgname)),
}
}
func (middleware *Recovery) Wrap(next http.Handler) http.Handler {
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 4096)
n := runtime.Stack(buf, false)
err := errors.New(errors.TypeFatal, errors.CodeFatal, fmt.Sprint(r)).WithStacktrace(string(buf[:n]))
middleware.logger.ErrorContext(req.Context(), "panic recovered", errors.Attr(err))
render.Error(rw, errors.Wrap(err, errors.TypeFatal, errors.CodeFatal, "An unexpected error occurred on our end. Please try again."))
}
}()
next.ServeHTTP(rw, req)
})
}

View File

@@ -1,164 +0,0 @@
package middleware
import (
"context"
"encoding/json"
"log/slog"
"net/http"
"net/http/httptest"
"testing"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRecovery(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
handler http.HandlerFunc
wantStatus int
wantLog bool
wantMessage string
wantErrorStatus bool
}{
{
name: "PanicWithString",
handler: func(w http.ResponseWriter, r *http.Request) {
panic("something went wrong")
},
wantStatus: http.StatusInternalServerError,
wantLog: true,
wantMessage: "something went wrong",
wantErrorStatus: true,
},
{
name: "PanicWithError",
handler: func(w http.ResponseWriter, r *http.Request) {
panic(errors.New(errors.TypeInternal, errors.CodeInternal, "db connection failed"))
},
wantStatus: http.StatusInternalServerError,
wantLog: true,
wantMessage: "db connection failed",
wantErrorStatus: true,
},
{
name: "PanicWithInteger",
handler: func(w http.ResponseWriter, r *http.Request) {
panic(42)
},
wantStatus: http.StatusInternalServerError,
wantLog: true,
wantMessage: "42",
wantErrorStatus: true,
},
{
name: "PanicWithDivisionByZero",
handler: func(w http.ResponseWriter, r *http.Request) {
divisor := 0
_ = 1 / divisor
},
wantStatus: http.StatusInternalServerError,
wantLog: true,
wantMessage: "runtime error: integer divide by zero",
wantErrorStatus: true,
},
{
name: "NoPanic",
handler: func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
},
wantStatus: http.StatusOK,
wantLog: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
var records []slog.Record
logger := slog.New(newRecordCollector(&records))
m := NewRecovery(logger)
handler := m.Wrap(http.HandlerFunc(tc.handler))
rr := httptest.NewRecorder()
req := httptest.NewRequest(http.MethodGet, "/", nil)
handler.ServeHTTP(rr, req)
assert.Equal(t, tc.wantStatus, rr.Code)
if !tc.wantLog {
assert.Empty(t, records)
return
}
require.Len(t, records, 1)
err := extractException(t, records[0])
require.NotNil(t, err)
typ, _, message, _, _, _ := errors.Unwrapb(err)
assert.Equal(t, errors.TypeFatal, typ)
assert.Equal(t, tc.wantMessage, message)
type stacktracer interface {
Stacktrace() string
}
st, ok := err.(stacktracer)
require.True(t, ok, "error should implement stacktracer")
assert.NotEmpty(t, st.Stacktrace())
if tc.wantErrorStatus {
var body map[string]any
require.NoError(t, json.Unmarshal(rr.Body.Bytes(), &body))
assert.Equal(t, "error", body["status"])
}
})
}
}
// extractException finds the "exception" attr in a log record and returns the error.
func extractException(t *testing.T, record slog.Record) error {
t.Helper()
var found error
record.Attrs(func(a slog.Attr) bool {
if a.Key == "exception" {
if err, ok := a.Value.Any().(error); ok {
found = err
return false
}
}
return true
})
return found
}
// recordCollector is an slog.Handler that collects records for assertion.
type recordCollector struct {
records *[]slog.Record
attrs []slog.Attr
}
func newRecordCollector(records *[]slog.Record) *recordCollector {
return &recordCollector{records: records}
}
func (h *recordCollector) Enabled(_ context.Context, _ slog.Level) bool { return true }
func (h *recordCollector) Handle(_ context.Context, record slog.Record) error {
for _, a := range h.attrs {
record.AddAttrs(a)
}
*h.records = append(*h.records, record)
return nil
}
func (h *recordCollector) WithAttrs(attrs []slog.Attr) slog.Handler {
return &recordCollector{records: h.records, attrs: append(h.attrs, attrs...)}
}
func (h *recordCollector) WithGroup(_ string) slog.Handler { return h }

View File

@@ -6,8 +6,6 @@ import (
"net/http"
"strings"
"time"
"github.com/SigNoz/signoz/pkg/errors"
)
const (
@@ -38,7 +36,7 @@ func NewTimeout(logger *slog.Logger, excludedRoutes []string, defaultTimeout tim
}
return &Timeout{
logger: logger.With(slog.String("pkg", pkgname)),
logger: logger.With("pkg", pkgname),
excluded: excluded,
defaultTimeout: defaultTimeout,
maxTimeout: maxTimeout,
@@ -53,7 +51,7 @@ func (middleware *Timeout) Wrap(next http.Handler) http.Handler {
if incoming != "" {
parsed, err := time.ParseDuration(strings.TrimSpace(incoming) + "s")
if err != nil {
middleware.logger.WarnContext(req.Context(), "cannot parse timeout in header, using default timeout", slog.String("timeout", incoming), errors.Attr(err))
middleware.logger.WarnContext(req.Context(), "cannot parse timeout in header, using default timeout", "timeout", incoming, "error", err)
} else {
if parsed > middleware.maxTimeout {
actual = middleware.maxTimeout

View File

@@ -64,8 +64,6 @@ func Error(rw http.ResponseWriter, cause error) {
httpCode = statusClientClosedConnection
case errors.TypeTimeout:
httpCode = http.StatusGatewayTimeout
case errors.TypeFatal:
httpCode = http.StatusInternalServerError
case errors.TypeLicenseUnavailable:
httpCode = http.StatusUnavailableForLegalReasons
}

View File

@@ -38,17 +38,17 @@ func New(logger *slog.Logger, cfg Config, handler http.Handler) (*Server, error)
return &Server{
srv: srv,
logger: logger.With(slog.String("pkg", "go.signoz.io/pkg/http/server")),
logger: logger.With("pkg", "go.signoz.io/pkg/http/server"),
handler: handler,
cfg: cfg,
}, nil
}
func (server *Server) Start(ctx context.Context) error {
server.logger.InfoContext(ctx, "starting http server", slog.String("address", server.srv.Addr))
server.logger.InfoContext(ctx, "starting http server", "address", server.srv.Addr)
if err := server.srv.ListenAndServe(); err != nil {
if err != http.ErrServerClosed {
server.logger.ErrorContext(ctx, "failed to start server", errors.Attr(err))
server.logger.ErrorContext(ctx, "failed to start server", "error", err)
return err
}
}
@@ -60,7 +60,7 @@ func (server *Server) Stop(ctx context.Context) error {
defer cancel()
if err := server.srv.Shutdown(ctx); err != nil {
server.logger.ErrorContext(ctx, "failed to stop server", errors.Attr(err))
server.logger.ErrorContext(ctx, "failed to stop server", "error", err)
return err
}

View File

@@ -5,14 +5,13 @@ import (
"net/http"
"time"
"golang.org/x/sync/singleflight"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/identn"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"golang.org/x/sync/singleflight"
)
// todo: will move this in types layer with service account integration
@@ -119,7 +118,7 @@ func (provider *provider) Post(ctx context.Context, _ *http.Request, _ authtypes
Where("revoked = false").
Exec(ctx)
if err != nil {
provider.settings.Logger().ErrorContext(ctx, "failed to update last used of api key", errors.Attr(err))
provider.settings.Logger().ErrorContext(ctx, "failed to update last used of api key", "error", err)
}
return true, nil
})

View File

@@ -2,7 +2,6 @@ package identn
import (
"context"
"log/slog"
"net/http"
"github.com/SigNoz/signoz/pkg/factory"
@@ -70,7 +69,7 @@ func NewIdentNResolver(ctx context.Context, providerSettings factory.ProviderSet
func (c *identNResolver) GetIdentN(r *http.Request) IdentN {
for _, idn := range c.identNs {
if idn.Test(r) {
c.settings.Logger().DebugContext(r.Context(), "identN matched", slog.Any("provider", idn.Name()))
c.settings.Logger().DebugContext(r.Context(), "identN matched", "provider", idn.Name())
return idn
}
}

View File

@@ -6,13 +6,11 @@ import (
"strings"
"time"
"golang.org/x/sync/singleflight"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/identn"
"github.com/SigNoz/signoz/pkg/tokenizer"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"golang.org/x/sync/singleflight"
)
type provider struct {
@@ -83,7 +81,7 @@ func (provider *provider) Post(ctx context.Context, _ *http.Request, _ authtypes
_, _, _ = provider.sfGroup.Do(accessToken, func() (any, error) {
if err := provider.tokenizer.SetLastObservedAt(ctx, accessToken, time.Now()); err != nil {
provider.settings.Logger().ErrorContext(ctx, "failed to set last observed at", errors.Attr(err))
provider.settings.Logger().ErrorContext(ctx, "failed to set last observed at", "error", err)
return false, err
}
return true, nil

View File

@@ -10,7 +10,7 @@ import (
type zapToSlogConverter struct{}
func NewLogger(config Config) *slog.Logger {
func NewLogger(config Config, wrappers ...loghandler.Wrapper) *slog.Logger {
logger := slog.New(
loghandler.New(
slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: config.Logs.Level, AddSource: true, ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {
@@ -27,9 +27,7 @@ func NewLogger(config Config) *slog.Logger {
return a
}}),
loghandler.NewCorrelation(),
loghandler.NewFiltering(),
loghandler.NewException(),
wrappers...,
),
)

View File

@@ -1,53 +0,0 @@
package loghandler
import (
"context"
"log/slog"
"github.com/SigNoz/signoz/pkg/errors"
)
type exception struct{}
func NewException() *exception {
return &exception{}
}
func (h *exception) Wrap(next LogHandler) LogHandler {
return LogHandlerFunc(func(ctx context.Context, record slog.Record) error {
var foundErr error
newRecord := slog.NewRecord(record.Time, record.Level, record.Message, record.PC)
record.Attrs(func(a slog.Attr) bool {
if a.Key == "exception" {
if err, ok := a.Value.Any().(error); ok {
foundErr = err
return true
}
}
newRecord.AddAttrs(a)
return true
})
if foundErr == nil {
return next.Handle(ctx, record)
}
t, c, m, _, _, _ := errors.Unwrapb(foundErr)
newRecord.AddAttrs(
slog.String("exception.type", t.String()),
slog.String("exception.code", c.String()),
slog.String("exception.message", m),
)
// Use the stacktrace captured at error creation time if available.
type stacktracer interface {
Stacktrace() string
}
if st, ok := foundErr.(stacktracer); ok && st.Stacktrace() != "" {
newRecord.AddAttrs(slog.String("exception.stacktrace", st.Stacktrace()))
}
return next.Handle(ctx, newRecord)
})
}

View File

@@ -1,100 +0,0 @@
package loghandler
import (
"bytes"
"context"
"encoding/json"
"log/slog"
"testing"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestException(t *testing.T) {
testCases := []struct {
name string
attrs []slog.Attr
exceptionType string
exceptionCode string
exceptionMessage string
hasException bool
}{
{
name: "PkgError",
attrs: []slog.Attr{
errors.Attr(errors.New(errors.TypeNotFound, errors.MustNewCode("test_code"), "resource not found")),
},
exceptionType: "not-found",
exceptionCode: "test_code",
exceptionMessage: "resource not found",
hasException: true,
},
{
name: "StdlibError",
attrs: []slog.Attr{
errors.Attr(errors.Newf(errors.TypeInternal, errors.MustNewCode("internal"), "something went wrong")),
},
exceptionType: "internal",
exceptionCode: "internal",
exceptionMessage: "something went wrong",
hasException: true,
},
{
name: "WrappedPkgError",
attrs: []slog.Attr{
errors.Attr(errors.Wrapf(errors.New(errors.TypeNotFound, errors.MustNewCode("not_found"), "db connection failed"), errors.TypeInternal, errors.MustNewCode("db_error"), "failed to fetch user")),
},
exceptionType: "internal",
exceptionCode: "db_error",
exceptionMessage: "failed to fetch user",
hasException: true,
},
{
name: "NoError",
attrs: []slog.Attr{
slog.String("key", "value"),
},
hasException: false,
},
{
name: "ExceptionKeyWithNonError",
attrs: []slog.Attr{
slog.String("exception", "not an error type"),
},
hasException: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
buf := bytes.NewBuffer(nil)
logger := slog.New(&handler{
base: slog.NewJSONHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug}),
wrappers: []Wrapper{NewException()},
})
logger.LogAttrs(context.Background(), slog.LevelError, "operation failed", tc.attrs...)
m := make(map[string]any)
err := json.Unmarshal(buf.Bytes(), &m)
require.NoError(t, err)
assert.Equal(t, "operation failed", m["msg"])
if tc.hasException {
assert.Equal(t, tc.exceptionType, m["exception.type"])
assert.Equal(t, tc.exceptionCode, m["exception.code"])
assert.Equal(t, tc.exceptionMessage, m["exception.message"])
stacktrace, ok := m["exception.stacktrace"].(string)
require.True(t, ok)
assert.Contains(t, stacktrace, "exception_test.go:")
} else {
assert.Nil(t, m["exception.type"])
assert.Nil(t, m["exception.code"])
assert.Nil(t, m["exception.message"])
assert.Nil(t, m["exception.stacktrace"])
}
})
}
}

View File

@@ -18,7 +18,7 @@ func TestFiltering_SuppressesContextCanceled(t *testing.T) {
buf := bytes.NewBuffer(nil)
logger := slog.New(&handler{base: slog.NewJSONHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug}), wrappers: []Wrapper{filtering}})
logger.ErrorContext(context.Background(), "operation failed", slog.Any("error", context.Canceled))
logger.ErrorContext(context.Background(), "operation failed", "error", context.Canceled)
assert.Empty(t, buf.String(), "log with context.Canceled should be suppressed")
}
@@ -29,7 +29,7 @@ func TestFiltering_AllowsOtherErrors(t *testing.T) {
buf := bytes.NewBuffer(nil)
logger := slog.New(&handler{base: slog.NewJSONHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug}), wrappers: []Wrapper{filtering}})
logger.ErrorContext(context.Background(), "operation failed", slog.Any("error", errors.New(errors.TypeInternal, errors.CodeInternal, "some other error")))
logger.ErrorContext(context.Background(), "operation failed", "error", errors.New(errors.TypeInternal, errors.CodeInternal, "some other error"))
m := make(map[string]any)
err := json.Unmarshal(buf.Bytes(), &m)
@@ -43,7 +43,7 @@ func TestFiltering_AllowsLogsWithoutErrors(t *testing.T) {
buf := bytes.NewBuffer(nil)
logger := slog.New(&handler{base: slog.NewJSONHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug}), wrappers: []Wrapper{filtering}})
logger.InfoContext(context.Background(), "normal log", slog.String("key", "value"))
logger.InfoContext(context.Background(), "normal log", "key", "value")
m := make(map[string]any)
err := json.Unmarshal(buf.Bytes(), &m)

View File

@@ -6,6 +6,7 @@ import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/instrumentation/loghandler"
"github.com/SigNoz/signoz/pkg/version"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
@@ -115,7 +116,7 @@ func New(ctx context.Context, cfg Config, build version.Build, serviceName strin
meterProvider: meterProvider,
meterProviderShutdownFunc: meterProviderShutdownFunc,
prometheusRegistry: prometheusRegistry,
logger: NewLogger(cfg),
logger: NewLogger(cfg, loghandler.NewCorrelation(), loghandler.NewFiltering()),
startCh: make(chan struct{}),
}, nil
}

View File

@@ -0,0 +1,134 @@
package implcloudintegration
import (
"context"
"time"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/cloudintegrationtypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type store struct {
store sqlstore.SQLStore
}
func NewStore(sqlStore sqlstore.SQLStore) cloudintegrationtypes.Store {
return &store{store: sqlStore}
}
func (s *store) GetAccountByID(ctx context.Context, orgID, id valuer.UUID, provider cloudintegrationtypes.CloudProviderType) (*cloudintegrationtypes.StorableCloudIntegration, error) {
account := new(cloudintegrationtypes.StorableCloudIntegration)
err := s.store.BunDBCtx(ctx).NewSelect().Model(account).
Where("id = ?", id).
Where("org_id = ?", orgID).
Where("provider = ?", provider).
Scan(ctx)
if err != nil {
return nil, s.store.WrapNotFoundErrf(err, cloudintegrationtypes.ErrCodeCloudIntegrationNotFound, "cloud integration account with id %s not found", id)
}
return account, nil
}
func (s *store) ListConnectedAccounts(ctx context.Context, orgID valuer.UUID, provider cloudintegrationtypes.CloudProviderType) ([]*cloudintegrationtypes.StorableCloudIntegration, error) {
var accounts []*cloudintegrationtypes.StorableCloudIntegration
err := s.store.BunDBCtx(ctx).NewSelect().Model(&accounts).
Where("org_id = ?", orgID).
Where("provider = ?", provider).
Where("removed_at IS NULL").
Where("account_id IS NOT NULL").
Where("last_agent_report IS NOT NULL").
Order("created_at ASC").
Scan(ctx)
if err != nil {
return nil, err
}
return accounts, nil
}
func (s *store) CreateAccount(ctx context.Context, account *cloudintegrationtypes.StorableCloudIntegration) (*cloudintegrationtypes.StorableCloudIntegration, error) {
_, err := s.store.BunDBCtx(ctx).NewInsert().Model(account).Exec(ctx)
if err != nil {
return nil, s.store.WrapAlreadyExistsErrf(err, cloudintegrationtypes.ErrCodeCloudIntegrationAlreadyExists, "cloud integration account with id %s already exists", account.ID)
}
return account, nil
}
func (s *store) UpdateAccount(ctx context.Context, account *cloudintegrationtypes.StorableCloudIntegration) error {
_, err := s.store.BunDBCtx(ctx).
NewUpdate().
Model(account).
WherePK().
Where("org_id = ?", account.OrgID).
Where("provider = ?", account.Provider).
Exec(ctx)
return err
}
func (s *store) RemoveAccount(ctx context.Context, orgID, id valuer.UUID, provider cloudintegrationtypes.CloudProviderType) error {
_, err := s.store.BunDBCtx(ctx).NewUpdate().Model(new(cloudintegrationtypes.StorableCloudIntegration)).
Set("removed_at = ?", time.Now()).
Where("id = ?", id).
Where("org_id = ?", orgID).
Where("provider = ?", provider).
Exec(ctx)
return err
}
func (s *store) GetConnectedAccount(ctx context.Context, orgID valuer.UUID, provider cloudintegrationtypes.CloudProviderType, providerAccountID string) (*cloudintegrationtypes.StorableCloudIntegration, error) {
account := new(cloudintegrationtypes.StorableCloudIntegration)
err := s.store.BunDBCtx(ctx).NewSelect().Model(account).
Where("org_id = ?", orgID).
Where("provider = ?", provider).
Where("account_id = ?", providerAccountID).
Where("last_agent_report IS NOT NULL").
Where("removed_at IS NULL").
Scan(ctx)
if err != nil {
return nil, s.store.WrapNotFoundErrf(err, cloudintegrationtypes.ErrCodeCloudIntegrationNotFound, "connected account with provider account id %s not found", providerAccountID)
}
return account, nil
}
func (s *store) GetServiceByServiceID(ctx context.Context, cloudIntegrationID valuer.UUID, serviceID cloudintegrationtypes.ServiceID) (*cloudintegrationtypes.StorableCloudIntegrationService, error) {
service := new(cloudintegrationtypes.StorableCloudIntegrationService)
err := s.store.BunDBCtx(ctx).NewSelect().Model(service).
Where("cloud_integration_id = ?", cloudIntegrationID).
Where("type = ?", serviceID).
Scan(ctx)
if err != nil {
return nil, s.store.WrapNotFoundErrf(err, cloudintegrationtypes.ErrCodeCloudIntegrationServiceNotFound, "cloud integration service with id %s not found", serviceID)
}
return service, nil
}
func (s *store) ListServices(ctx context.Context, cloudIntegrationID valuer.UUID) ([]*cloudintegrationtypes.StorableCloudIntegrationService, error) {
var services []*cloudintegrationtypes.StorableCloudIntegrationService
err := s.store.BunDBCtx(ctx).NewSelect().Model(&services).
Where("cloud_integration_id = ?", cloudIntegrationID).
Scan(ctx)
if err != nil {
return nil, err
}
return services, nil
}
func (s *store) CreateService(ctx context.Context, service *cloudintegrationtypes.StorableCloudIntegrationService) (*cloudintegrationtypes.StorableCloudIntegrationService, error) {
_, err := s.store.BunDBCtx(ctx).NewInsert().Model(service).Exec(ctx)
if err != nil {
return nil, s.store.WrapAlreadyExistsErrf(err, cloudintegrationtypes.ErrCodeCloudIntegrationServiceAlreadyExists, "cloud integration service with id %s already exists for integration account", service.Type)
}
return service, nil
}
func (s *store) UpdateService(ctx context.Context, service *cloudintegrationtypes.StorableCloudIntegrationService) error {
_, err := s.store.BunDBCtx(ctx).NewUpdate().Model(service).
WherePK().
Where("cloud_integration_id = ?", service.CloudIntegrationID).
Where("type = ?", service.Type).
Exec(ctx)
return err
}

View File

@@ -2,7 +2,6 @@ package impldashboard
import (
"context"
"log/slog"
"slices"
"github.com/SigNoz/signoz/pkg/analytics"
@@ -309,7 +308,7 @@ func (module *module) checkClickHouseQueriesForMetricNames(ctx context.Context,
result, err := module.queryParser.AnalyzeQueryFilter(ctx, qbtypes.QueryTypeClickHouseSQL, queryStr)
if err != nil {
// Log warning and continue - parsing errors shouldn't break the search
module.settings.Logger().WarnContext(ctx, "failed to parse ClickHouse query", slog.String("query", queryStr), errors.Attr(err))
module.settings.Logger().WarnContext(ctx, "failed to parse ClickHouse query", "query", queryStr, "error", err)
continue
}
@@ -344,7 +343,7 @@ func (module *module) checkPromQLQueriesForMetricNames(ctx context.Context, quer
result, err := module.queryParser.AnalyzeQueryFilter(ctx, qbtypes.QueryTypePromQL, queryStr)
if err != nil {
// Log warning and continue - parsing errors shouldn't break the search
module.settings.Logger().WarnContext(ctx, "failed to parse PromQL query", slog.String("query", queryStr), errors.Attr(err))
module.settings.Logger().WarnContext(ctx, "failed to parse PromQL query", "query", queryStr, "error", err)
continue
}

View File

@@ -8,9 +8,6 @@ import (
"strings"
"time"
sqlbuilder "github.com/huandu/go-sqlbuilder"
"golang.org/x/sync/errgroup"
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
@@ -28,6 +25,8 @@ import (
"github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/SigNoz/signoz/pkg/valuer"
sqlbuilder "github.com/huandu/go-sqlbuilder"
"golang.org/x/sync/errgroup"
)
type module struct {
@@ -511,7 +510,7 @@ func (m *module) fetchMetadataFromCache(ctx context.Context, orgID valuer.UUID,
if err := m.cache.Get(ctx, orgID, cacheKey, &cachedMetadata); err == nil {
hits[metricName] = &cachedMetadata
} else {
m.logger.WarnContext(ctx, "cache miss for metric metadata", slog.String("metric_name", metricName), errors.Attr(err))
m.logger.WarnContext(ctx, "cache miss for metric metadata", "metric_name", metricName, "error", err)
misses = append(misses, metricName)
}
}
@@ -567,7 +566,7 @@ func (m *module) fetchUpdatedMetadata(ctx context.Context, orgID valuer.UUID, me
cacheKey := generateMetricMetadataCacheKey(metricName)
if err := m.cache.Set(ctx, orgID, cacheKey, &metricMetadata, 0); err != nil {
m.logger.WarnContext(ctx, "failed to set metric metadata in cache", slog.String("metric_name", metricName), errors.Attr(err))
m.logger.WarnContext(ctx, "failed to set metric metadata in cache", "metric_name", metricName, "error", err)
}
}
@@ -627,7 +626,7 @@ func (m *module) fetchTimeseriesMetadata(ctx context.Context, orgID valuer.UUID,
cacheKey := generateMetricMetadataCacheKey(metricName)
if err := m.cache.Set(ctx, orgID, cacheKey, &metricMetadata, 0); err != nil {
m.logger.WarnContext(ctx, "failed to set metric metadata in cache", slog.String("metric_name", metricName), errors.Attr(err))
m.logger.WarnContext(ctx, "failed to set metric metadata in cache", "metric_name", metricName, "error", err)
}
}
@@ -766,7 +765,7 @@ func (m *module) insertMetricsMetadata(ctx context.Context, orgID valuer.UUID, r
}
cacheKey := generateMetricMetadataCacheKey(req.MetricName)
if err := m.cache.Set(ctx, orgID, cacheKey, metricMetadata, 0); err != nil {
m.logger.WarnContext(ctx, "failed to set metric metadata in cache after insert", slog.String("metric_name", req.MetricName), errors.Attr(err))
m.logger.WarnContext(ctx, "failed to set metric metadata in cache after insert", "metric_name", req.MetricName, "error", err)
}
return nil

View File

@@ -273,7 +273,7 @@ func (module *module) CreateFactorAPIKey(ctx context.Context, factorAPIKey *serv
"KeyID": factorAPIKey.ID.String(),
"KeyCreatedAt": factorAPIKey.CreatedAt.String(),
}); err != nil {
module.settings.Logger().ErrorContext(ctx, "failed to send email", errors.Attr(err))
module.settings.Logger().ErrorContext(ctx, "failed to send email", "error", err)
}
return nil
@@ -328,7 +328,7 @@ func (module *module) RevokeFactorAPIKey(ctx context.Context, serviceAccountID v
"KeyID": factorAPIKey.ID.String(),
"KeyCreatedAt": factorAPIKey.CreatedAt.String(),
}); err != nil {
module.settings.Logger().ErrorContext(ctx, "failed to send email", errors.Attr(err))
module.settings.Logger().ErrorContext(ctx, "failed to send email", "error", err)
}
return nil

View File

@@ -2,7 +2,6 @@ package implsession
import (
"context"
"log/slog"
"net/url"
"slices"
"strings"
@@ -133,7 +132,7 @@ func (module *module) CreateCallbackAuthNSession(ctx context.Context, authNProvi
callbackIdentity, err := callbackAuthN.HandleCallback(ctx, values)
if err != nil {
module.settings.Logger().ErrorContext(ctx, "failed to handle callback", errors.Attr(err), slog.Any("authn_provider", authNProvider))
module.settings.Logger().ErrorContext(ctx, "failed to handle callback", "error", err, "authn_provider", authNProvider)
return "", err
}

View File

@@ -2,13 +2,10 @@ package impluser
import (
"context"
"log/slog"
"slices"
"strings"
"time"
"github.com/dustin/go-humanize"
"github.com/SigNoz/signoz/pkg/analytics"
"github.com/SigNoz/signoz/pkg/authz"
"github.com/SigNoz/signoz/pkg/emailing"
@@ -23,6 +20,7 @@ import (
"github.com/SigNoz/signoz/pkg/types/emailtypes"
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/dustin/go-humanize"
)
type Module struct {
@@ -109,7 +107,7 @@ func (m *Module) CreateBulkInvite(ctx context.Context, orgID valuer.UUID, userID
// generate reset password token
resetPasswordToken, err := m.GetOrCreateResetPasswordToken(ctx, newUser.ID)
if err != nil {
m.settings.Logger().ErrorContext(ctx, "failed to create reset password token for invited user", errors.Attr(err))
m.settings.Logger().ErrorContext(ctx, "failed to create reset password token for invited user", "error", err)
return err
}
@@ -151,7 +149,7 @@ func (m *Module) CreateBulkInvite(ctx context.Context, orgID valuer.UUID, userID
frontendBaseUrl := bulkInvites.Invites[idx].FrontendBaseUrl
if frontendBaseUrl == "" {
m.settings.Logger().InfoContext(ctx, "frontend base url is not provided, skipping email", slog.Any("invitee_email", userWithToken.User.Email))
m.settings.Logger().InfoContext(ctx, "frontend base url is not provided, skipping email", "invitee_email", userWithToken.User.Email)
continue
}
@@ -165,7 +163,7 @@ func (m *Module) CreateBulkInvite(ctx context.Context, orgID valuer.UUID, userID
"link": resetLink,
"Expiry": humanizedTokenLifetime,
}); err != nil {
m.settings.Logger().ErrorContext(ctx, "failed to send invite email", errors.Attr(err))
m.settings.Logger().ErrorContext(ctx, "failed to send invite email", "error", err)
}
}
@@ -407,7 +405,7 @@ func (module *Module) ForgotPassword(ctx context.Context, orgID valuer.UUID, ema
token, err := module.GetOrCreateResetPasswordToken(ctx, user.ID)
if err != nil {
module.settings.Logger().ErrorContext(ctx, "failed to create reset password token", errors.Attr(err))
module.settings.Logger().ErrorContext(ctx, "failed to create reset password token", "error", err)
return err
}
@@ -429,7 +427,7 @@ func (module *Module) ForgotPassword(ctx context.Context, orgID valuer.UUID, ema
"Expiry": humanizedTokenLifetime,
},
); err != nil {
module.settings.Logger().ErrorContext(ctx, "failed to send reset password email", errors.Attr(err))
module.settings.Logger().ErrorContext(ctx, "failed to send reset password email", "error", err)
return nil
}

View File

@@ -60,7 +60,7 @@ func (s *service) Start(ctx context.Context) error {
return nil
}
s.settings.Logger().WarnContext(ctx, "root user reconciliation failed, retrying", errors.Attr(err))
s.settings.Logger().WarnContext(ctx, "root user reconciliation failed, retrying", "error", err)
select {
case <-s.stopC:

View File

@@ -6,6 +6,7 @@ import (
"encoding/json"
"fmt"
"net/http"
"runtime/debug"
"strconv"
"github.com/SigNoz/signoz/pkg/analytics"
@@ -50,6 +51,26 @@ func (handler *handler) QueryRange(rw http.ResponseWriter, req *http.Request) {
return
}
defer func() {
if r := recover(); r != nil {
stackTrace := string(debug.Stack())
queryJSON, _ := json.Marshal(queryRangeRequest)
handler.set.Logger.ErrorContext(ctx, "panic in QueryRange",
"error", r,
"user", claims.UserID,
"payload", string(queryJSON),
"stacktrace", stackTrace,
)
render.Error(rw, errors.NewInternalf(
errors.CodeInternal,
"Something went wrong on our end. It's not you, it's us. Our team is notified about it. Reach out to support if issue persists.",
))
}
}()
// Validate the query request
if err := queryRangeRequest.Validate(); err != nil {
render.Error(rw, err)
@@ -130,6 +151,26 @@ func (handler *handler) QueryRawStream(rw http.ResponseWriter, req *http.Request
return
}
defer func() {
if r := recover(); r != nil {
stackTrace := string(debug.Stack())
queryJSON, _ := json.Marshal(queryRangeRequest)
handler.set.Logger.ErrorContext(ctx, "panic in QueryRawStream",
"error", r,
"user", claims.UserID,
"payload", string(queryJSON),
"stacktrace", stackTrace,
)
render.Error(rw, errors.NewInternalf(
errors.CodeInternal,
"Something went wrong on our end. It's not you, it's us. Our team is notified about it. Reach out to support if issue persists.",
))
}
}()
// Validate the query request
if err := queryRangeRequest.Validate(); err != nil {
render.Error(rw, err)

View File

@@ -47,19 +47,19 @@ func (bc *bucketCache) GetMissRanges(
// Get query window
startMs, endMs := q.Window()
bc.logger.DebugContext(ctx, "getting miss ranges", slog.String("fingerprint", q.Fingerprint()), slog.Uint64("start", startMs), slog.Uint64("end", endMs))
bc.logger.DebugContext(ctx, "getting miss ranges", "fingerprint", q.Fingerprint(), "start", startMs, "end", endMs)
// Generate cache key
cacheKey := bc.generateCacheKey(q)
bc.logger.DebugContext(ctx, "cache key", slog.String("cache_key", cacheKey))
bc.logger.DebugContext(ctx, "cache key", "cache_key", cacheKey)
// Try to get cached data
var data qbtypes.CachedData
err := bc.cache.Get(ctx, orgID, cacheKey, &data)
if err != nil {
if !errors.Ast(err, errors.TypeNotFound) {
bc.logger.ErrorContext(ctx, "error getting cached data", errors.Attr(err))
bc.logger.ErrorContext(ctx, "error getting cached data", "error", err)
}
// No cached data, need to fetch entire range
missing = []*qbtypes.TimeRange{{From: startMs, To: endMs}}
@@ -71,7 +71,7 @@ func (bc *bucketCache) GetMissRanges(
// Find missing ranges with step alignment
missing = bc.findMissingRangesWithStep(data.Buckets, startMs, endMs, stepMs)
bc.logger.DebugContext(ctx, "missing ranges", slog.Any("missing", missing), slog.Uint64("step", stepMs))
bc.logger.DebugContext(ctx, "missing ranges", "missing", missing, "step", stepMs)
// If no cached data overlaps with requested range, return empty result
if len(data.Buckets) == 0 {
@@ -105,9 +105,9 @@ func (bc *bucketCache) Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Que
// If the entire range is within flux interval, skip caching
if startMs >= fluxBoundary {
bc.logger.DebugContext(ctx, "entire range within flux interval, skipping cache",
slog.Uint64("start", startMs),
slog.Uint64("end", endMs),
slog.Uint64("flux_boundary", fluxBoundary))
"start", startMs,
"end", endMs,
"flux_boundary", fluxBoundary)
return
}
@@ -116,8 +116,8 @@ func (bc *bucketCache) Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Que
if endMs > fluxBoundary {
cachableEndMs = fluxBoundary
bc.logger.DebugContext(ctx, "adjusting end time to exclude flux interval",
slog.Uint64("original_end", endMs),
slog.Uint64("cachable_end", cachableEndMs))
"original_end", endMs,
"cachable_end", cachableEndMs)
}
// Generate cache key
@@ -155,11 +155,11 @@ func (bc *bucketCache) Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Que
// If after adjustment we have no complete intervals, don't cache
if cachableStartMs >= cachableEndMs {
bc.logger.DebugContext(ctx, "no complete intervals to cache",
slog.Uint64("original_start", startMs),
slog.Uint64("original_end", endMs),
slog.Uint64("adjusted_start", cachableStartMs),
slog.Uint64("adjusted_end", cachableEndMs),
slog.Uint64("step", stepMs))
"original_start", startMs,
"original_end", endMs,
"adjusted_start", cachableStartMs,
"adjusted_end", cachableEndMs,
"step", stepMs)
return
}
}
@@ -187,7 +187,7 @@ func (bc *bucketCache) Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Que
// Marshal and store in cache
if err := bc.cache.Set(ctx, orgID, cacheKey, &updatedData, bc.cacheTTL); err != nil {
bc.logger.ErrorContext(ctx, "error setting cached data", errors.Attr(err))
bc.logger.ErrorContext(ctx, "error setting cached data", "error", err)
}
}
@@ -471,7 +471,7 @@ func (bc *bucketCache) mergeTimeSeriesValues(ctx context.Context, buckets []*qbt
for _, bucket := range buckets {
var tsData *qbtypes.TimeSeriesData
if err := json.Unmarshal(bucket.Value, &tsData); err != nil {
bc.logger.ErrorContext(ctx, "failed to unmarshal time series data", errors.Attr(err))
bc.logger.ErrorContext(ctx, "failed to unmarshal time series data", "error", err)
continue
}
@@ -623,7 +623,7 @@ func (bc *bucketCache) resultToBuckets(ctx context.Context, result *qbtypes.Resu
// In the future, we could split large ranges into smaller buckets
valueBytes, err := json.Marshal(result.Value)
if err != nil {
bc.logger.ErrorContext(ctx, "failed to marshal result value", errors.Attr(err))
bc.logger.ErrorContext(ctx, "failed to marshal result value", "error", err)
return nil
}

View File

@@ -3,15 +3,12 @@ package querier
import (
"context"
"fmt"
"log/slog"
"math"
"slices"
"sort"
"strings"
"github.com/SigNoz/govaluate"
"github.com/SigNoz/signoz/pkg/errors"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
)
@@ -361,14 +358,14 @@ func (q *querier) processTimeSeriesFormula(
// Create formula evaluator
evaluator, err := qbtypes.NewFormulaEvaluator(formula.Expression, canDefaultZero)
if err != nil {
q.logger.ErrorContext(ctx, "failed to create formula evaluator", errors.Attr(err), slog.String("formula", formula.Name))
q.logger.ErrorContext(ctx, "failed to create formula evaluator", "error", err, "formula", formula.Name)
return nil
}
// Evaluate the formula
formulaSeries, err := evaluator.EvaluateFormula(timeSeriesData)
if err != nil {
q.logger.ErrorContext(ctx, "failed to evaluate formula", errors.Attr(err), slog.String("formula", formula.Name))
q.logger.ErrorContext(ctx, "failed to evaluate formula", "error", err, "formula", formula.Name)
return nil
}
@@ -511,13 +508,13 @@ func (q *querier) processScalarFormula(
canDefaultZero := req.GetQueriesSupportingZeroDefault()
evaluator, err := qbtypes.NewFormulaEvaluator(formula.Expression, canDefaultZero)
if err != nil {
q.logger.ErrorContext(ctx, "failed to create formula evaluator", errors.Attr(err), slog.String("formula", formula.Name))
q.logger.ErrorContext(ctx, "failed to create formula evaluator", "error", err, "formula", formula.Name)
return nil
}
formulaSeries, err := evaluator.EvaluateFormula(timeSeriesData)
if err != nil {
q.logger.ErrorContext(ctx, "failed to evaluate formula", errors.Attr(err), slog.String("formula", formula.Name))
q.logger.ErrorContext(ctx, "failed to evaluate formula", "error", err, "formula", formula.Name)
return nil
}

View File

@@ -11,10 +11,6 @@ import (
"text/template"
"time"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/prometheus"
"github.com/SigNoz/signoz/pkg/querybuilder"
@@ -22,6 +18,9 @@ import (
"github.com/SigNoz/signoz/pkg/types/instrumentationtypes"
qbv5 "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
)
// unquotedDottedNamePattern matches unquoted identifiers containing dots
@@ -93,7 +92,7 @@ func newPromqlQuery(
func (q *promqlQuery) Fingerprint() string {
query, err := q.renderVars(q.query.Query, q.vars, q.tr.From, q.tr.To)
if err != nil {
q.logger.ErrorContext(context.TODO(), "failed render template variables", slog.String("query", q.query.Query))
q.logger.ErrorContext(context.TODO(), "failed render template variables", "query", q.query.Query)
return ""
}
parts := []string{
@@ -136,7 +135,7 @@ func (q *promqlQuery) removeAllVarMatchers(query string, vars map[string]qbv5.Va
// Create visitor and walk the AST
visitor := &allVarRemover{allVars: allVars}
if err := parser.Walk(visitor, expr, nil); err != nil {
q.logger.ErrorContext(context.TODO(), "unexpected error while removing __all__ variable matchers", errors.Attr(err), slog.String("query", query))
q.logger.ErrorContext(context.TODO(), "unexpected error while removing __all__ variable matchers", "error", err, "query", query)
return "", errors.WrapInternalf(err, errors.CodeInternal, "error while removing __all__ variable matchers")
}

View File

@@ -10,9 +10,6 @@ import (
"sync"
"time"
"github.com/dustin/go-humanize"
"golang.org/x/exp/maps"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/prometheus"
@@ -23,6 +20,8 @@ import (
"github.com/SigNoz/signoz/pkg/types/instrumentationtypes"
"github.com/SigNoz/signoz/pkg/types/metrictypes"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/dustin/go-humanize"
"golang.org/x/exp/maps"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/valuer"
@@ -369,10 +368,10 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
var err error
metricTemporality, metricTypes, err = q.metadataStore.FetchTemporalityAndTypeMulti(ctx, req.Start, req.End, metricNames...)
if err != nil {
q.logger.WarnContext(ctx, "failed to fetch metric temporality", errors.Attr(err), slog.Any("metrics", metricNames))
q.logger.WarnContext(ctx, "failed to fetch metric temporality", "error", err, "metrics", metricNames)
return nil, errors.NewInternalf(errors.CodeInternal, "failed to fetch metrics temporality")
}
q.logger.DebugContext(ctx, "fetched metric temporalities and types", slog.Any("metric_temporality", metricTemporality), slog.Any("metric_types", metricTypes))
q.logger.DebugContext(ctx, "fetched metric temporalities and types", "metric_temporality", metricTemporality, "metric_types", metricTypes)
}
for i := range spec.Aggregations {
if spec.Aggregations[i].MetricName != "" && spec.Aggregations[i].Temporality == metrictypes.Unknown {
@@ -591,9 +590,9 @@ func (q *querier) run(
// Skip cache if NoCache is set, or if cache is not available
if req.NoCache || q.bucketCache == nil || query.Fingerprint() == "" {
if req.NoCache {
q.logger.DebugContext(ctx, "NoCache flag set, bypassing cache", slog.String("query", name))
q.logger.DebugContext(ctx, "NoCache flag set, bypassing cache", "query", name)
} else {
q.logger.InfoContext(ctx, "no bucket cache or fingerprint, executing query", slog.String("fingerprint", query.Fingerprint()))
q.logger.InfoContext(ctx, "no bucket cache or fingerprint, executing query", "fingerprint", query.Fingerprint())
}
result, err := query.Execute(ctx)
qbEvent.HasData = qbEvent.HasData || hasData(result)
@@ -710,8 +709,8 @@ func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query
totalStats := qbtypes.ExecStats{}
q.logger.DebugContext(ctx, "executing queries for missing ranges",
slog.Int("missing_ranges_count", len(missingRanges)),
slog.Any("ranges", missingRanges))
"missing_ranges_count", len(missingRanges),
"ranges", missingRanges)
sem := make(chan struct{}, 4)
var wg sync.WaitGroup
@@ -749,7 +748,7 @@ func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query
for _, err := range errs {
if err != nil {
// If any query failed, fall back to full execution
q.logger.ErrorContext(ctx, "parallel query execution failed", errors.Attr(err))
q.logger.ErrorContext(ctx, "parallel query execution failed", "error", err)
result, err := query.Execute(ctx)
if err != nil {
return nil, err

View File

@@ -9,14 +9,13 @@ import (
"log/slog"
"golang.org/x/exp/slices"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/opamptypes"
"github.com/SigNoz/signoz/pkg/valuer"
"golang.org/x/exp/slices"
)
var (
@@ -136,7 +135,7 @@ func (r *Repo) insertConfig(
configVersion, err := r.GetLatestVersion(ctx, orgId, c.ElementType)
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
slog.ErrorContext(ctx, "failed to fetch latest config version", errors.Attr(err))
slog.ErrorContext(ctx, "failed to fetch latest config version", "error", err)
return err
}
@@ -156,11 +155,11 @@ func (r *Repo) insertConfig(
// Delete elements first, then version (to respect potential foreign key constraints)
_, delErr := r.store.BunDB().NewDelete().Model(new(opamptypes.AgentConfigElement)).Where("version_id = ?", c.ID).Exec(ctx)
if delErr != nil {
slog.ErrorContext(ctx, "failed to delete config elements during cleanup", errors.Attr(delErr), "version_id", c.ID.String())
slog.ErrorContext(ctx, "failed to delete config elements during cleanup", "error", delErr, "version_id", c.ID.String())
}
_, delErr = r.store.BunDB().NewDelete().Model(new(opamptypes.AgentConfigVersion)).Where("id = ?", c.ID).Where("org_id = ?", orgId).Exec(ctx)
if delErr != nil {
slog.ErrorContext(ctx, "failed to delete config version during cleanup", errors.Attr(delErr), "version_id", c.ID.String())
slog.ErrorContext(ctx, "failed to delete config version during cleanup", "error", delErr, "version_id", c.ID.String())
}
}
}()
@@ -171,7 +170,7 @@ func (r *Repo) insertConfig(
Model(c).
Exec(ctx)
if dbErr != nil {
slog.ErrorContext(ctx, "error in inserting config version", errors.Attr(dbErr))
slog.ErrorContext(ctx, "error in inserting config version", "error", dbErr)
return errors.WrapInternalf(dbErr, CodeConfigVersionInsertFailed, "failed to insert config version")
}
@@ -222,7 +221,7 @@ func (r *Repo) updateDeployStatus(ctx context.Context,
Where("org_id = ?", orgId).
Exec(ctx)
if err != nil {
slog.ErrorContext(ctx, "failed to update deploy status", errors.Attr(err))
slog.ErrorContext(ctx, "failed to update deploy status", "error", err)
return model.BadRequest(fmt.Errorf("failed to update deploy status"))
}
@@ -259,7 +258,7 @@ func (r *Repo) updateDeployStatusByHash(
Where("org_id = ?", orgId).
Exec(ctx)
if err != nil {
slog.ErrorContext(ctx, "failed to update deploy status", errors.Attr(err))
slog.ErrorContext(ctx, "failed to update deploy status", "error", err)
return errors.WrapInternalf(err, CodeConfigDeployStatusUpdateFailed, "failed to update deploy status")
}

View File

@@ -9,9 +9,6 @@ import (
"sync"
"sync/atomic"
"github.com/google/uuid"
yaml "gopkg.in/yaml.v3"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/app/opamp"
filterprocessor "github.com/SigNoz/signoz/pkg/query-service/app/opamp/otelconfig/filterprocessor"
@@ -20,6 +17,8 @@ import (
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/opamptypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/google/uuid"
yaml "gopkg.in/yaml.v3"
)
var m *Manager
@@ -186,6 +185,7 @@ func (m *Manager) GetDeployStatusByHash(ctx context.Context, orgId valuer.UUID,
return m.Repo.GetDeployStatusByHash(ctx, orgId, configHash)
}
func GetLatestVersion(
ctx context.Context, orgId valuer.UUID, elementType opamptypes.ElementType,
) (*opamptypes.AgentConfigVersion, error) {
@@ -230,7 +230,7 @@ func NotifyConfigUpdate(ctx context.Context) {
func Redeploy(ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType, version int) error {
configVersion, err := GetConfigVersion(ctx, orgId, typ, version)
if err != nil {
slog.ErrorContext(ctx, "failed to fetch config version during redeploy", errors.Attr(err))
slog.ErrorContext(ctx, "failed to fetch config version during redeploy", "error", err)
return err
}
@@ -242,7 +242,7 @@ func Redeploy(ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType
case opamptypes.ElementTypeSamplingRules:
var config *tsp.Config
if err := yaml.Unmarshal([]byte(configVersion.Config), &config); err != nil {
slog.DebugContext(ctx, "failed to read last conf correctly", errors.Attr(err))
slog.DebugContext(ctx, "failed to read last conf correctly", "error", err)
return model.BadRequest(fmt.Errorf("failed to read the stored config correctly"))
}
@@ -254,7 +254,7 @@ func Redeploy(ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType
opamp.AddToTracePipelineSpec("signoz_tail_sampling")
configHash, err := opamp.UpsertControlProcessors(ctx, "traces", processorConf, m.OnConfigUpdate)
if err != nil {
slog.ErrorContext(ctx, "failed to call agent config update for trace processor", errors.Attr(err))
slog.ErrorContext(ctx, "failed to call agent config update for trace processor", "error", err)
return errors.WithAdditionalf(err, "failed to deploy the config")
}
@@ -262,7 +262,7 @@ func Redeploy(ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType
case opamptypes.ElementTypeDropRules:
var filterConfig *filterprocessor.Config
if err := yaml.Unmarshal([]byte(configVersion.Config), &filterConfig); err != nil {
slog.ErrorContext(ctx, "failed to read last conf correctly", errors.Attr(err))
slog.ErrorContext(ctx, "failed to read last conf correctly", "error", err)
return model.InternalError(fmt.Errorf("failed to read the stored config correctly"))
}
processorConf := map[string]interface{}{
@@ -272,7 +272,7 @@ func Redeploy(ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType
opamp.AddToMetricsPipelineSpec("filter")
configHash, err := opamp.UpsertControlProcessors(ctx, "metrics", processorConf, m.OnConfigUpdate)
if err != nil {
slog.ErrorContext(ctx, "failed to call agent config update for trace processor", errors.Attr(err))
slog.ErrorContext(ctx, "failed to call agent config update for trace processor", "error", err)
return err
}
@@ -298,13 +298,13 @@ func UpsertFilterProcessor(ctx context.Context, orgId valuer.UUID, version int,
opamp.AddToMetricsPipelineSpec("filter")
configHash, err := opamp.UpsertControlProcessors(ctx, "metrics", processorConf, m.OnConfigUpdate)
if err != nil {
slog.ErrorContext(ctx, "failed to call agent config update for trace processor", errors.Attr(err))
slog.ErrorContext(ctx, "failed to call agent config update for trace processor", "error", err)
return err
}
processorConfYaml, yamlErr := yaml.Marshal(config)
if yamlErr != nil {
slog.WarnContext(ctx, "unexpected error while transforming processor config to yaml", errors.Attr(yamlErr))
slog.WarnContext(ctx, "unexpected error while transforming processor config to yaml", "error", yamlErr)
}
m.updateDeployStatus(ctx, orgId, opamptypes.ElementTypeDropRules, version, opamptypes.DeployInitiated.StringValue(), "Deployment started", configHash, string(processorConfYaml))
@@ -349,13 +349,13 @@ func UpsertSamplingProcessor(ctx context.Context, orgId valuer.UUID, version int
opamp.AddToTracePipelineSpec("signoz_tail_sampling")
configHash, err := opamp.UpsertControlProcessors(ctx, "traces", processorConf, m.OnConfigUpdate)
if err != nil {
slog.ErrorContext(ctx, "failed to call agent config update for trace processor", errors.Attr(err))
slog.ErrorContext(ctx, "failed to call agent config update for trace processor", "error", err)
return err
}
processorConfYaml, yamlErr := yaml.Marshal(config)
if yamlErr != nil {
slog.WarnContext(ctx, "unexpected error while transforming processor config to yaml", errors.Attr(yamlErr))
slog.WarnContext(ctx, "unexpected error while transforming processor config to yaml", "error", yamlErr)
}
m.updateDeployStatus(ctx, orgId, opamptypes.ElementTypeSamplingRules, version, opamptypes.DeployInitiated.StringValue(), "Deployment started", configHash, string(processorConfYaml))

View File

@@ -9,7 +9,6 @@ import (
"strings"
"github.com/SigNoz/signoz-otel-collector/utils/fingerprint"
errorsV2 "github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/model"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
)
@@ -79,7 +78,7 @@ func (r *ClickHouseReader) GetQBFilterSuggestionsForLogs(
)
if err != nil {
// Do not fail the entire request if only example query generation fails
r.logger.ErrorContext(ctx, "could not find attribute values for creating example query", errorsV2.Attr(err))
r.logger.ErrorContext(ctx, "could not find attribute values for creating example query", "error", err)
} else {
// add example queries for as many attributes as possible.
@@ -184,7 +183,7 @@ func (r *ClickHouseReader) getValuesForLogAttributes(
rows, err := r.db.Query(ctx, query, tagKeyQueryArgs...)
if err != nil {
r.logger.ErrorContext(ctx, "couldn't query attrib values for suggestions", errorsV2.Attr(err))
r.logger.ErrorContext(ctx, "couldn't query attrib values for suggestions", "error", err)
return nil, model.InternalError(fmt.Errorf(
"couldn't query attrib values for suggestions: %w", err,
))

View File

@@ -15,8 +15,6 @@ import (
"sync"
"time"
"github.com/uptrace/bun"
"github.com/SigNoz/signoz/pkg/prometheus"
"github.com/SigNoz/signoz/pkg/query-service/model/metrics_explorer"
"github.com/SigNoz/signoz/pkg/sqlstore"
@@ -26,18 +24,17 @@ import (
"github.com/SigNoz/signoz/pkg/types/instrumentationtypes"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/google/uuid"
"github.com/pkg/errors"
"github.com/uptrace/bun"
errorsV2 "github.com/SigNoz/signoz/pkg/errors"
"github.com/google/uuid"
"github.com/pkg/errors"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/util/stats"
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
"github.com/SigNoz/signoz/pkg/cache"
"log/slog"
@@ -324,7 +321,7 @@ func (r *ClickHouseReader) GetTopLevelOperations(ctx context.Context, start, end
rows, err := r.db.Query(ctx, query, clickhouse.Named("start", start), clickhouse.Named("services", services))
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
@@ -396,7 +393,7 @@ func (r *ClickHouseReader) buildResourceSubQuery(tags []model.TagQueryParam, svc
v3.AttributeKey{},
false)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return "", err
}
return resourceSubQuery, nil
@@ -476,7 +473,7 @@ func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.G
resourceSubQuery, err := r.buildResourceSubQuery(queryParams.Tags, svc, *queryParams.Start, *queryParams.End)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return
}
query += `
@@ -501,7 +498,7 @@ func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.G
}
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return
}
@@ -513,7 +510,7 @@ func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.G
err = r.db.QueryRow(ctx, errorQuery, args...).Scan(&numErrors)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return
}
@@ -796,7 +793,7 @@ func (r *ClickHouseReader) GetTopOperations(ctx context.Context, queryParams *mo
resourceSubQuery, err := r.buildResourceSubQuery(queryParams.Tags, queryParams.ServiceName, *queryParams.Start, *queryParams.End)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
query += `
@@ -813,7 +810,7 @@ func (r *ClickHouseReader) GetTopOperations(ctx context.Context, queryParams *mo
err = r.db.Select(ctx, &topOperationsItems, query, namedArgs...)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
@@ -850,7 +847,7 @@ func (r *ClickHouseReader) GetUsage(ctx context.Context, queryParams *model.GetU
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, fmt.Errorf("error in processing sql query")
}
@@ -880,7 +877,7 @@ func (r *ClickHouseReader) GetSpansForTrace(ctx context.Context, traceID string,
if err == sql.ErrNoRows {
return []model.SpanItemV2{}, nil
}
r.logger.Error("Error in processing trace summary sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing trace summary sql query", "error", err)
return nil, model.ExecutionError(fmt.Errorf("error in processing trace summary sql query: %w", err))
}
@@ -889,7 +886,7 @@ func (r *ClickHouseReader) GetSpansForTrace(ctx context.Context, traceID string,
err = r.db.Select(ctx, &searchScanResponses, traceDetailsQuery, traceID, strconv.FormatInt(traceSummary.Start.Unix()-1800, 10), strconv.FormatInt(traceSummary.End.Unix(), 10))
r.logger.Info(traceDetailsQuery)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, model.ExecutionError(fmt.Errorf("error in processing trace data sql query: %w", err))
}
r.logger.Info("trace details query took: ", "duration", time.Since(queryStartTime), "traceID", traceID)
@@ -901,7 +898,7 @@ func (r *ClickHouseReader) GetWaterfallSpansForTraceWithMetadataCache(ctx contex
cachedTraceData := new(model.GetWaterfallSpansForTraceWithMetadataCache)
err := r.cacheForTraceDetail.Get(ctx, orgID, strings.Join([]string{"getWaterfallSpansForTraceWithMetadata", traceID}, "-"), cachedTraceData)
if err != nil {
r.logger.Debug("error in retrieving getWaterfallSpansForTraceWithMetadata cache", errorsV2.Attr(err), "traceID", traceID)
r.logger.Debug("error in retrieving getWaterfallSpansForTraceWithMetadata cache", "error", err, "traceID", traceID)
return nil, err
}
@@ -952,7 +949,7 @@ func (r *ClickHouseReader) GetWaterfallSpansForTraceWithMetadata(ctx context.Con
ref := []model.OtelSpanRef{}
err := json.Unmarshal([]byte(item.References), &ref)
if err != nil {
r.logger.Error("getWaterfallSpansForTraceWithMetadata: error unmarshalling references", errorsV2.Attr(err), "traceID", traceID)
r.logger.Error("getWaterfallSpansForTraceWithMetadata: error unmarshalling references", "error", err, "traceID", traceID)
return nil, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, "getWaterfallSpansForTraceWithMetadata: error unmarshalling references %s", err.Error())
}
@@ -972,7 +969,7 @@ func (r *ClickHouseReader) GetWaterfallSpansForTraceWithMetadata(ctx context.Con
var eventMap model.Event
err = json.Unmarshal([]byte(event), &eventMap)
if err != nil {
r.logger.Error("Error unmarshalling events", errorsV2.Attr(err))
r.logger.Error("Error unmarshalling events", "error", err)
return nil, errorsV2.Newf(errorsV2.TypeInternal, errorsV2.CodeInternal, "getWaterfallSpansForTraceWithMetadata: error in unmarshalling events %s", err.Error())
}
events = append(events, eventMap)
@@ -1085,7 +1082,7 @@ func (r *ClickHouseReader) GetWaterfallSpansForTraceWithMetadata(ctx context.Con
r.logger.Info("getWaterfallSpansForTraceWithMetadata: processing pre cache", "duration", time.Since(processingBeforeCache), "traceID", traceID)
cacheErr := r.cacheForTraceDetail.Set(ctx, orgID, strings.Join([]string{"getWaterfallSpansForTraceWithMetadata", traceID}, "-"), &traceCache, time.Minute*5)
if cacheErr != nil {
r.logger.Debug("failed to store cache for getWaterfallSpansForTraceWithMetadata", "traceID", traceID, errorsV2.Attr(err))
r.logger.Debug("failed to store cache for getWaterfallSpansForTraceWithMetadata", "traceID", traceID, "error", err)
}
}
@@ -1119,7 +1116,7 @@ func (r *ClickHouseReader) GetFlamegraphSpansForTraceCache(ctx context.Context,
cachedTraceData := new(model.GetFlamegraphSpansForTraceCache)
err := r.cacheForTraceDetail.Get(ctx, orgID, strings.Join([]string{"getFlamegraphSpansForTrace", traceID}, "-"), cachedTraceData)
if err != nil {
r.logger.Debug("error in retrieving getFlamegraphSpansForTrace cache", errorsV2.Attr(err), "traceID", traceID)
r.logger.Debug("error in retrieving getFlamegraphSpansForTrace cache", "error", err, "traceID", traceID)
return nil, err
}
@@ -1167,7 +1164,7 @@ func (r *ClickHouseReader) GetFlamegraphSpansForTrace(ctx context.Context, orgID
ref := []model.OtelSpanRef{}
err := json.Unmarshal([]byte(item.References), &ref)
if err != nil {
r.logger.Error("Error unmarshalling references", errorsV2.Attr(err))
r.logger.Error("Error unmarshalling references", "error", err)
return nil, errorsV2.Newf(errorsV2.TypeInternal, errorsV2.CodeInternal, "getFlamegraphSpansForTrace: error in unmarshalling references %s", err.Error())
}
@@ -1176,7 +1173,7 @@ func (r *ClickHouseReader) GetFlamegraphSpansForTrace(ctx context.Context, orgID
var eventMap model.Event
err = json.Unmarshal([]byte(event), &eventMap)
if err != nil {
r.logger.Error("Error unmarshalling events", errorsV2.Attr(err))
r.logger.Error("Error unmarshalling events", "error", err)
return nil, errorsV2.Newf(errorsV2.TypeInternal, errorsV2.CodeInternal, "getFlamegraphSpansForTrace: error in unmarshalling events %s", err.Error())
}
events = append(events, eventMap)
@@ -1255,7 +1252,7 @@ func (r *ClickHouseReader) GetFlamegraphSpansForTrace(ctx context.Context, orgID
r.logger.Info("getFlamegraphSpansForTrace: processing pre cache", "duration", time.Since(processingBeforeCache), "traceID", traceID)
cacheErr := r.cacheForTraceDetail.Set(ctx, orgID, strings.Join([]string{"getFlamegraphSpansForTrace", traceID}, "-"), &traceCache, time.Minute*5)
if cacheErr != nil {
r.logger.Debug("failed to store cache for getFlamegraphSpansForTrace", "traceID", traceID, errorsV2.Attr(err))
r.logger.Debug("failed to store cache for getFlamegraphSpansForTrace", "traceID", traceID, "error", err)
}
}
@@ -1315,7 +1312,7 @@ func (r *ClickHouseReader) GetDependencyGraph(ctx context.Context, queryParams *
err := r.db.Select(ctx, &response, query, args...)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, fmt.Errorf("error in processing sql query %w", err)
}
@@ -1434,13 +1431,13 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
Model(&ttl).
Exec(ctx)
if dbErr != nil {
r.logger.Error("error in inserting to ttl_status table", errorsV2.Attr(dbErr))
r.logger.Error("error in inserting to ttl_status table", "error", dbErr)
return
}
err := r.setColdStorage(context.Background(), tableName, params.ColdStorageVolume)
if err != nil {
r.logger.Error("error in setting cold storage", errorsV2.Attr(err))
r.logger.Error("error in setting cold storage", "error", err)
statusItem, apiErr := r.checkTTLStatusItem(ctx, orgID, tableName)
if apiErr == nil {
_, dbErr := r.
@@ -1453,7 +1450,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
r.logger.Error("Error in processing ttl_status update sql query", errorsV2.Attr(dbErr))
r.logger.Error("Error in processing ttl_status update sql query", "error", dbErr)
return
}
}
@@ -1462,7 +1459,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
r.logger.Info("Executing TTL request: ", "request", query)
statusItem, _ := r.checkTTLStatusItem(ctx, orgID, tableName)
if err := r.db.Exec(ctx, query); err != nil {
r.logger.Error("error while setting ttl", errorsV2.Attr(err))
r.logger.Error("error while setting ttl", "error", err)
_, dbErr := r.
sqlDB.
BunDB().
@@ -1473,7 +1470,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
r.logger.Error("Error in processing ttl_status update sql query", errorsV2.Attr(dbErr))
r.logger.Error("Error in processing ttl_status update sql query", "error", dbErr)
return
}
return
@@ -1488,7 +1485,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
r.logger.Error("Error in processing ttl_status update sql query", errorsV2.Attr(dbErr))
r.logger.Error("Error in processing ttl_status update sql query", "error", dbErr)
return
}
}
@@ -1572,7 +1569,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
Model(&ttl).
Exec(ctx)
if dbErr != nil {
r.logger.Error("error in inserting to ttl_status table", errorsV2.Attr(dbErr))
r.logger.Error("error in inserting to ttl_status table", "error", dbErr)
return
}
@@ -1590,7 +1587,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
}
err := r.setColdStorage(context.Background(), tableName, params.ColdStorageVolume)
if err != nil {
r.logger.Error("Error in setting cold storage", errorsV2.Attr(err))
r.logger.Error("Error in setting cold storage", "error", err)
statusItem, apiErr := r.checkTTLStatusItem(ctx, orgID, tableName)
if apiErr == nil {
_, dbErr := r.
@@ -1603,7 +1600,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
r.logger.Error("Error in processing ttl_status update sql query", errorsV2.Attr(dbErr))
r.logger.Error("Error in processing ttl_status update sql query", "error", dbErr)
return
}
}
@@ -1613,7 +1610,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
r.logger.Error(" ExecutingTTL request: ", "request", req)
statusItem, _ := r.checkTTLStatusItem(ctx, orgID, tableName)
if err := r.db.Exec(ctx, req); err != nil {
r.logger.Error("Error in executing set TTL query", errorsV2.Attr(err))
r.logger.Error("Error in executing set TTL query", "error", err)
_, dbErr := r.
sqlDB.
BunDB().
@@ -1624,7 +1621,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
r.logger.Error("Error in processing ttl_status update sql query", errorsV2.Attr(dbErr))
r.logger.Error("Error in processing ttl_status update sql query", "error", dbErr)
return
}
return
@@ -1639,7 +1636,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
r.logger.Error("Error in processing ttl_status update sql query", errorsV2.Attr(dbErr))
r.logger.Error("Error in processing ttl_status update sql query", "error", dbErr)
return
}
}(distributedTableName)
@@ -1665,7 +1662,7 @@ func (r *ClickHouseReader) hasCustomRetentionColumn(ctx context.Context) (bool,
r.logger.Debug("_retention_days column not found in logs table", "table", r.logsLocalTableV2)
return false, nil
}
r.logger.Error("Error checking for _retention_days column", errorsV2.Attr(err))
r.logger.Error("Error checking for _retention_days column", "error", err)
return false, errorsV2.Wrapf(err, errorsV2.TypeInternal, errorsV2.CodeInternal, "error checking columns")
}
@@ -1845,14 +1842,14 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *m
// Insert TTL setting record
_, dbErr := r.sqlDB.BunDB().NewInsert().Model(&customTTL).Exec(ctx)
if dbErr != nil {
r.logger.Error("error in inserting to custom_retention_ttl_settings table", errorsV2.Attr(dbErr))
r.logger.Error("error in inserting to custom_retention_ttl_settings table", "error", dbErr)
return nil, errorsV2.Wrapf(dbErr, errorsV2.TypeInternal, errorsV2.CodeInternal, "error inserting TTL settings")
}
if len(params.ColdStorageVolume) > 0 && coldStorageDuration > 0 {
err := r.setColdStorage(ctx, tableName, params.ColdStorageVolume)
if err != nil {
r.logger.Error("error in setting cold storage", errorsV2.Attr(err))
r.logger.Error("error in setting cold storage", "error", err)
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, constants.StatusFailed)
return nil, errorsV2.Wrapf(err.Err, errorsV2.TypeInternal, errorsV2.CodeInternal, "error setting cold storage for table %s", tableName)
}
@@ -1861,7 +1858,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *m
for i, query := range queries {
r.logger.Debug("Executing custom retention TTL request: ", "request", query, "step", i+1)
if err := r.db.Exec(ctx, query); err != nil {
r.logger.Error("error while setting custom retention ttl", errorsV2.Attr(err))
r.logger.Error("error while setting custom retention ttl", "error", err)
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, constants.StatusFailed)
return nil, errorsV2.Wrapf(err, errorsV2.TypeInternal, errorsV2.CodeInternal, "error setting custom retention TTL for table %s, query: %s", tableName, query)
}
@@ -1953,7 +1950,7 @@ func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID stri
hasCustomRetention, err := r.hasCustomRetentionColumn(ctx)
if err != nil {
// If there's an error checking, assume V1 and proceed
r.logger.Warn("Error checking for custom retention column, assuming V1", errorsV2.Attr(err))
r.logger.Warn("Error checking for custom retention column, assuming V1", "error", err)
hasCustomRetention = false
}
@@ -1974,7 +1971,7 @@ func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID stri
Scan(ctx)
if err != nil && err != sql.ErrNoRows {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, errorsV2.Newf(errorsV2.TypeInternal, errorsV2.CodeInternal, "error in processing get custom ttl query")
}
@@ -1991,7 +1988,7 @@ func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID stri
var ttlConditions []model.CustomRetentionRule
if customTTL.Condition != "" {
if err := json.Unmarshal([]byte(customTTL.Condition), &ttlConditions); err != nil {
r.logger.Error("Error parsing TTL conditions", errorsV2.Attr(err))
r.logger.Error("Error parsing TTL conditions", "error", err)
ttlConditions = []model.CustomRetentionRule{}
}
}
@@ -2044,7 +2041,7 @@ func (r *ClickHouseReader) checkCustomRetentionTTLStatusItem(ctx context.Context
Scan(ctx)
if err != nil && err != sql.ErrNoRows {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return ttl, errorsV2.Newf(errorsV2.TypeInternal, errorsV2.CodeInternal, "error in processing custom_retention_ttl_status check sql query")
}
@@ -2061,7 +2058,7 @@ func (r *ClickHouseReader) updateCustomRetentionTTLStatus(ctx context.Context, o
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
r.logger.Error("Error in processing custom_retention_ttl_status update sql query", errorsV2.Attr(dbErr))
r.logger.Error("Error in processing custom_retention_ttl_status update sql query", "error", dbErr)
}
}
}
@@ -2244,7 +2241,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
Model(&ttl).
Exec(ctx)
if dbErr != nil {
r.logger.Error("error in inserting to ttl_status table", errorsV2.Attr(dbErr))
r.logger.Error("error in inserting to ttl_status table", "error", dbErr)
return
}
timeColumn := "timestamp_ms"
@@ -2262,7 +2259,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
}
err := r.setColdStorage(context.Background(), tableName, params.ColdStorageVolume)
if err != nil {
r.logger.Error("Error in setting cold storage", errorsV2.Attr(err))
r.logger.Error("Error in setting cold storage", "error", err)
statusItem, apiErr := r.checkTTLStatusItem(ctx, orgID, tableName)
if apiErr == nil {
_, dbErr := r.
@@ -2275,7 +2272,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
r.logger.Error("Error in processing ttl_status update sql query", errorsV2.Attr(dbErr))
r.logger.Error("Error in processing ttl_status update sql query", "error", dbErr)
return
}
}
@@ -2285,7 +2282,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
r.logger.Info("Executing TTL request: ", "request", req)
statusItem, _ := r.checkTTLStatusItem(ctx, orgID, tableName)
if err := r.db.Exec(ctx, req); err != nil {
r.logger.Error("error while setting ttl.", errorsV2.Attr(err))
r.logger.Error("error while setting ttl.", "error", err)
_, dbErr := r.
sqlDB.
BunDB().
@@ -2296,7 +2293,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
r.logger.Error("Error in processing ttl_status update sql query", errorsV2.Attr(dbErr))
r.logger.Error("Error in processing ttl_status update sql query", "error", dbErr)
return
}
return
@@ -2311,7 +2308,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
r.logger.Error("Error in processing ttl_status update sql query", errorsV2.Attr(dbErr))
r.logger.Error("Error in processing ttl_status update sql query", "error", dbErr)
return
}
}
@@ -2336,7 +2333,7 @@ func (r *ClickHouseReader) deleteTtlTransactions(ctx context.Context, orgID stri
Scan(ctx, &limitTransactions)
if err != nil {
r.logger.Error("Error in processing ttl_status delete sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing ttl_status delete sql query", "error", err)
}
_, err = r.
@@ -2347,7 +2344,7 @@ func (r *ClickHouseReader) deleteTtlTransactions(ctx context.Context, orgID stri
Where("transaction_id NOT IN (?)", bun.In(limitTransactions)).
Exec(ctx)
if err != nil {
r.logger.Error("Error in processing ttl_status delete sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing ttl_status delete sql query", "error", err)
}
}
@@ -2366,7 +2363,7 @@ func (r *ClickHouseReader) checkTTLStatusItem(ctx context.Context, orgID string,
Limit(1).
Scan(ctx)
if err != nil && err != sql.ErrNoRows {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return ttl, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
}
return ttl, nil
@@ -2413,7 +2410,7 @@ func (r *ClickHouseReader) setColdStorage(ctx context.Context, tableName string,
r.logger.Info("Executing Storage policy request: ", "request", policyReq)
if err := r.db.Exec(ctx, policyReq); err != nil {
r.logger.Error("error while setting storage policy", errorsV2.Attr(err))
r.logger.Error("error while setting storage policy", "error", err)
return &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while setting storage policy. Err=%v", err)}
}
}
@@ -2430,7 +2427,7 @@ func (r *ClickHouseReader) GetDisks(ctx context.Context) (*[]model.DiskItem, *mo
query := "SELECT name,type FROM system.disks"
if err := r.db.Select(ctx, &diskItems, query); err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting disks. Err=%v", err)}
}
@@ -2490,7 +2487,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
err := r.db.Select(ctx, &dbResp, query)
if err != nil {
r.logger.Error("error while getting ttl", errorsV2.Attr(err))
r.logger.Error("error while getting ttl", "error", err)
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting ttl. Err=%v", err)}
}
if len(dbResp) == 0 {
@@ -2508,7 +2505,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
err := r.db.Select(ctx, &dbResp, query)
if err != nil {
r.logger.Error("error while getting ttl", errorsV2.Attr(err))
r.logger.Error("error while getting ttl", "error", err)
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting ttl. Err=%v", err)}
}
if len(dbResp) == 0 {
@@ -2526,7 +2523,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
err := r.db.Select(ctx, &dbResp, query)
if err != nil {
r.logger.Error("error while getting ttl", errorsV2.Attr(err))
r.logger.Error("error while getting ttl", "error", err)
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting ttl. Err=%v", err)}
}
if len(dbResp) == 0 {
@@ -2659,7 +2656,7 @@ func (r *ClickHouseReader) ListErrors(ctx context.Context, queryParams *model.Li
args = append(args, argsSubQuery...)
if errStatus != nil {
r.logger.Error("Error in processing tags", errorsV2.Attr(errStatus))
r.logger.Error("Error in processing tags", "error", errStatus)
return nil, errStatus
}
query = query + " GROUP BY groupID"
@@ -2690,7 +2687,7 @@ func (r *ClickHouseReader) ListErrors(ctx context.Context, queryParams *model.Li
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
@@ -2725,7 +2722,7 @@ func (r *ClickHouseReader) CountErrors(ctx context.Context, queryParams *model.C
args = append(args, argsSubQuery...)
if errStatus != nil {
r.logger.Error("Error in processing tags", errorsV2.Attr(errStatus))
r.logger.Error("Error in processing tags", "error", errStatus)
return 0, errStatus
}
@@ -2733,7 +2730,7 @@ func (r *ClickHouseReader) CountErrors(ctx context.Context, queryParams *model.C
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return 0, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
@@ -2760,7 +2757,7 @@ func (r *ClickHouseReader) GetErrorFromErrorID(ctx context.Context, queryParams
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
@@ -2789,7 +2786,7 @@ func (r *ClickHouseReader) GetErrorFromGroupID(ctx context.Context, queryParams
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
@@ -2813,12 +2810,12 @@ func (r *ClickHouseReader) GetNextPrevErrorIDs(ctx context.Context, queryParams
}
getNextPrevErrorIDsResponse.NextErrorID, getNextPrevErrorIDsResponse.NextTimestamp, apiErr = r.getNextErrorID(ctx, queryParams)
if apiErr != nil {
r.logger.Error("Unable to get next error ID due to err: ", errorsV2.Attr(apiErr))
r.logger.Error("Unable to get next error ID due to err: ", "error", apiErr)
return nil, apiErr
}
getNextPrevErrorIDsResponse.PrevErrorID, getNextPrevErrorIDsResponse.PrevTimestamp, apiErr = r.getPrevErrorID(ctx, queryParams)
if apiErr != nil {
r.logger.Error("Unable to get prev error ID due to err: ", errorsV2.Attr(apiErr))
r.logger.Error("Unable to get prev error ID due to err: ", "error", apiErr)
return nil, apiErr
}
return &getNextPrevErrorIDsResponse, nil
@@ -2842,7 +2839,7 @@ func (r *ClickHouseReader) getNextErrorID(ctx context.Context, queryParams *mode
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
if len(getNextErrorIDReponse) == 0 {
@@ -2863,7 +2860,7 @@ func (r *ClickHouseReader) getNextErrorID(ctx context.Context, queryParams *mode
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
if len(getNextErrorIDReponse) == 0 {
@@ -2877,7 +2874,7 @@ func (r *ClickHouseReader) getNextErrorID(ctx context.Context, queryParams *mode
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
@@ -2916,7 +2913,7 @@ func (r *ClickHouseReader) getPrevErrorID(ctx context.Context, queryParams *mode
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
if len(getPrevErrorIDReponse) == 0 {
@@ -2937,7 +2934,7 @@ func (r *ClickHouseReader) getPrevErrorID(ctx context.Context, queryParams *mode
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
if len(getPrevErrorIDReponse) == 0 {
@@ -2951,7 +2948,7 @@ func (r *ClickHouseReader) getPrevErrorID(ctx context.Context, queryParams *mode
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
@@ -2979,7 +2976,7 @@ func (r *ClickHouseReader) FetchTemporality(ctx context.Context, orgID valuer.UU
// Batch fetch all metadata at once
metadataMap, apiErr := r.GetUpdatedMetricsMetadata(ctx, orgID, metricNames...)
if apiErr != nil {
r.logger.Warn("Failed to fetch updated metrics metadata", errorsV2.Attr(apiErr))
r.logger.Warn("Failed to fetch updated metrics metadata", "error", apiErr)
return nil, apiErr
}
@@ -3328,7 +3325,7 @@ func (r *ClickHouseReader) QueryDashboardVars(ctx context.Context, query string)
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, err
}
@@ -3382,7 +3379,7 @@ func (r *ClickHouseReader) GetMetricAggregateAttributes(ctx context.Context, org
rows, err := r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText), normalized)
if err != nil {
r.logger.Error("Error while querying metric names", errorsV2.Attr(err))
r.logger.Error("Error while querying metric names", "error", err)
return nil, fmt.Errorf("error while executing metric name query: %s", err.Error())
}
defer rows.Close()
@@ -3461,7 +3458,7 @@ func (r *ClickHouseReader) GetMeterAggregateAttributes(ctx context.Context, orgI
rows, err := r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText))
if err != nil {
r.logger.Error("Error while querying meter names", errorsV2.Attr(err))
r.logger.Error("Error while querying meter names", "error", err)
return nil, fmt.Errorf("error while executing meter name query: %s", err.Error())
}
defer rows.Close()
@@ -3516,7 +3513,7 @@ func (r *ClickHouseReader) GetMetricAttributeKeys(ctx context.Context, req *v3.F
}
rows, err = r.db.Query(ctx, query, req.AggregateAttribute, common.PastDayRoundOff(), normalized, fmt.Sprintf("%%%s%%", req.SearchText))
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -3556,7 +3553,7 @@ func (r *ClickHouseReader) GetMeterAttributeKeys(ctx context.Context, req *v3.Fi
}
rows, err = r.db.Query(ctx, query, req.AggregateAttribute, fmt.Sprintf("%%%s%%", req.SearchText))
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -3605,7 +3602,7 @@ func (r *ClickHouseReader) GetMetricAttributeValues(ctx context.Context, req *v3
rows, err = r.db.Query(ctx, query, req.FilterAttributeKey, names, req.FilterAttributeKey, fmt.Sprintf("%%%s%%", req.SearchText), common.PastDayRoundOff(), normalized)
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -3635,7 +3632,7 @@ func (r *ClickHouseReader) GetMetricMetadata(ctx context.Context, orgID valuer.U
// 1. Fetch metadata from cache/db using unified function
metadataMap, apiError := r.GetUpdatedMetricsMetadata(ctx, orgID, metricName)
if apiError != nil {
r.logger.Error("Error in getting metric cached metadata", errorsV2.Attr(apiError))
r.logger.Error("Error in getting metric cached metadata", "error", apiError)
return nil, fmt.Errorf("error fetching metric metadata: %s", apiError.Err.Error())
}
@@ -3678,7 +3675,7 @@ func (r *ClickHouseReader) GetMetricMetadata(ctx context.Context, orgID valuer.U
rows, err := r.db.Query(ctx, query, metricName, unixMilli, serviceName, serviceName)
if err != nil {
r.logger.Error("Error while querying histogram buckets", errorsV2.Attr(err))
r.logger.Error("Error while querying histogram buckets", "error", err)
return nil, fmt.Errorf("error while querying histogram buckets: %s", err.Error())
}
defer rows.Close()
@@ -3690,7 +3687,7 @@ func (r *ClickHouseReader) GetMetricMetadata(ctx context.Context, orgID valuer.U
}
le, err := strconv.ParseFloat(leStr, 64)
if err != nil || math.IsInf(le, 0) {
r.logger.Error("Invalid 'le' bucket value", "value", leStr, errorsV2.Attr(err))
r.logger.Error("Invalid 'le' bucket value", "value", leStr, "error", err)
continue
}
leFloat64 = append(leFloat64, le)
@@ -3903,7 +3900,7 @@ func (r *ClickHouseReader) GetLogAggregateAttributes(ctx context.Context, req *v
query = fmt.Sprintf("SELECT DISTINCT(tag_key), tag_type, tag_data_type from %s.%s WHERE %s and tag_type != 'logfield' limit $2", r.logsDB, r.logsTagAttributeTableV2, where)
rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText), req.Limit)
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -3967,7 +3964,7 @@ func (r *ClickHouseReader) GetLogAttributeKeys(ctx context.Context, req *v3.Filt
}
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4182,7 +4179,7 @@ func (r *ClickHouseReader) GetLogAttributeValues(ctx context.Context, req *v3.Fi
}
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4252,7 +4249,7 @@ func readRow(vars []interface{}, columnNames []string, countOfNumberCols int) ([
var metric map[string]string
err := json.Unmarshal([]byte(*v), &metric)
if err != nil {
slog.Error("unexpected error encountered", errorsV2.Attr(err))
slog.Error("unexpected error encountered", "error", err)
}
for key, val := range metric {
groupBy = append(groupBy, val)
@@ -4448,7 +4445,7 @@ func (r *ClickHouseReader) GetTimeSeriesResultV3(ctx context.Context, query stri
go func() {
err := r.queryProgressTracker.ReportQueryProgress(qid, p)
if err != nil {
r.logger.Error("Couldn't report query progress", "queryId", qid, errorsV2.Attr(err))
r.logger.Error("Couldn't report query progress", "queryId", qid, "error", err)
}
}()
},
@@ -4459,7 +4456,7 @@ func (r *ClickHouseReader) GetTimeSeriesResultV3(ctx context.Context, query stri
rows, err := r.db.Query(ctx, query)
if err != nil {
r.logger.Error("error while reading time series result", errorsV2.Attr(err))
r.logger.Error("error while reading time series result", "error", err)
return nil, errors.New(err.Error())
}
defer rows.Close()
@@ -4501,7 +4498,7 @@ func (r *ClickHouseReader) GetListResultV3(ctx context.Context, query string) ([
})
rows, err := r.db.Query(ctx, query)
if err != nil {
r.logger.Error("error while reading time series result", errorsV2.Attr(err))
r.logger.Error("error while reading time series result", "error", err)
return nil, errors.New(err.Error())
}
@@ -4579,7 +4576,7 @@ func (r *ClickHouseReader) GetMetricsExistenceAndEarliestTime(ctx context.Contex
var count, minFirstReported uint64
err := r.db.QueryRow(ctx, query, clickhouse.Named("metric_names", metricNames)).Scan(&count, &minFirstReported)
if err != nil {
r.logger.Error("error getting host metrics existence and earliest time", errorsV2.Attr(err))
r.logger.Error("error getting host metrics existence and earliest time", "error", err)
return 0, 0, err
}
return count, minFirstReported, nil
@@ -4589,7 +4586,7 @@ func getPersonalisedError(err error) error {
if err == nil {
return nil
}
slog.Error("error while reading result", errorsV2.Attr(err))
slog.Error("error while reading result", "error", err)
if strings.Contains(err.Error(), "code: 307") {
return chErrors.ErrResourceBytesLimitExceeded
}
@@ -4667,7 +4664,7 @@ func (r *ClickHouseReader) GetTraceAggregateAttributes(ctx context.Context, req
rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText))
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4733,7 +4730,7 @@ func (r *ClickHouseReader) GetTraceAttributeKeys(ctx context.Context, req *v3.Fi
rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText), req.Limit)
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4849,7 +4846,7 @@ func (r *ClickHouseReader) GetTraceAttributeValues(ctx context.Context, req *v3.
}
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4898,7 +4895,7 @@ func (r *ClickHouseReader) GetSpanAttributeKeysByNames(ctx context.Context, name
rows, err = r.db.Query(ctx, query)
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -5059,7 +5056,7 @@ func (r *ClickHouseReader) ReadRuleStateHistoryByRuleID(
r.logger.Debug("rule state history query", "query", query)
err := r.db.Select(ctx, &history, query)
if err != nil {
r.logger.Error("Error while reading rule state history", errorsV2.Attr(err))
r.logger.Error("Error while reading rule state history", "error", err)
return nil, err
}
@@ -5127,7 +5124,7 @@ func (r *ClickHouseReader) ReadRuleStateHistoryTopContributorsByRuleID(
contributors := []model.RuleStateHistoryContributor{}
err := r.db.Select(ctx, &contributors, query)
if err != nil {
r.logger.Error("Error while reading rule state history", errorsV2.Attr(err))
r.logger.Error("Error while reading rule state history", "error", err)
return nil, err
}
@@ -5428,7 +5425,7 @@ func (r *ClickHouseReader) GetMinAndMaxTimestampForTraceID(ctx context.Context,
err := r.db.QueryRow(ctx, query).Scan(&minTime, &maxTime)
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return 0, 0, err
}
@@ -5474,7 +5471,7 @@ func (r *ClickHouseReader) GetAllMetricFilterAttributeKeys(ctx context.Context,
valueCtx := context.WithValue(ctx, "clickhouse_max_threads", constants.MetricsExplorerClickhouseThreads)
rows, err := r.db.Query(valueCtx, query, common.PastDayRoundOff(), normalized, fmt.Sprintf("%%%s%%", req.SearchText)) //only showing past day data
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
@@ -5520,7 +5517,7 @@ func (r *ClickHouseReader) GetAllMetricFilterAttributeValues(ctx context.Context
rows, err = r.db.Query(valueCtx, query, req.FilterKey, req.FilterKey, fmt.Sprintf("%%%s%%", req.SearchText), common.PastDayRoundOff(), normalized) //only showing past day data
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
defer rows.Close()
@@ -5554,7 +5551,7 @@ func (r *ClickHouseReader) GetAllMetricFilterUnits(ctx context.Context, req *met
valueCtx := context.WithValue(ctx, "clickhouse_max_threads", constants.MetricsExplorerClickhouseThreads)
rows, err := r.db.Query(valueCtx, query, fmt.Sprintf("%%%s%%", req.SearchText))
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
@@ -5585,7 +5582,7 @@ func (r *ClickHouseReader) GetAllMetricFilterTypes(ctx context.Context, req *met
valueCtx := context.WithValue(ctx, "clickhouse_max_threads", constants.MetricsExplorerClickhouseThreads)
rows, err := r.db.Query(valueCtx, query, fmt.Sprintf("%%%s%%", req.SearchText))
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
@@ -5824,7 +5821,7 @@ func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, orgID valuer.
queryDuration := time.Since(begin)
r.logger.Info("Time taken to execute metrics query to fetch metrics with high time series", "query", metricsQuery, "args", args, "duration", queryDuration)
if err != nil {
r.logger.Error("Error executing metrics query", errorsV2.Attr(err))
r.logger.Error("Error executing metrics query", "error", err)
return &metrics_explorer.SummaryListMetricsResponse{}, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
defer rows.Close()
@@ -5835,14 +5832,14 @@ func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, orgID valuer.
for rows.Next() {
var metric metrics_explorer.MetricDetail
if err := rows.Scan(&metric.MetricName, &metric.Description, &metric.MetricType, &metric.MetricUnit, &metric.TimeSeries, &response.Total); err != nil {
r.logger.Error("Error scanning metric row", errorsV2.Attr(err))
r.logger.Error("Error scanning metric row", "error", err)
return &response, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
metricNames = append(metricNames, metric.MetricName)
response.Metrics = append(response.Metrics, metric)
}
if err := rows.Err(); err != nil {
r.logger.Error("Error iterating over metric rows", errorsV2.Attr(err))
r.logger.Error("Error iterating over metric rows", "error", err)
return &response, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
// If no metrics were found, return early.
@@ -5929,7 +5926,7 @@ func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, orgID valuer.
queryDuration = time.Since(begin)
r.logger.Info("Time taken to execute list summary query", "query", sampleQuery, "args", args, "duration", queryDuration)
if err != nil {
r.logger.Error("Error executing samples query", errorsV2.Attr(err))
r.logger.Error("Error executing samples query", "error", err)
return &response, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
defer rows.Close()
@@ -5940,20 +5937,20 @@ func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, orgID valuer.
var samples uint64
var metricName string
if err := rows.Scan(&samples, &metricName); err != nil {
r.logger.Error("Error scanning sample row", errorsV2.Attr(err))
r.logger.Error("Error scanning sample row", "error", err)
return &response, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
samplesMap[metricName] = samples
}
if err := rows.Err(); err != nil {
r.logger.Error("Error iterating over sample rows", errorsV2.Attr(err))
r.logger.Error("Error iterating over sample rows", "error", err)
return &response, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
//get updated metrics data
batch, apiError := r.GetUpdatedMetricsMetadata(ctx, orgID, metricNames...)
if apiError != nil {
r.logger.Error("Error in getting metrics cached metadata", errorsV2.Attr(apiError))
r.logger.Error("Error in getting metrics cached metadata", "error", apiError)
}
var filteredMetrics []metrics_explorer.MetricDetail
@@ -6045,7 +6042,7 @@ func (r *ClickHouseReader) GetMetricsTimeSeriesPercentage(ctx context.Context, r
duration := time.Since(begin)
r.logger.Info("Time taken to execute time series percentage query", "query", query, "args", args, "duration", duration)
if err != nil {
r.logger.Error("Error executing time series percentage query", errorsV2.Attr(err), "query", query)
r.logger.Error("Error executing time series percentage query", "error", err, "query", query)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
defer rows.Close()
@@ -6054,14 +6051,14 @@ func (r *ClickHouseReader) GetMetricsTimeSeriesPercentage(ctx context.Context, r
for rows.Next() {
var item metrics_explorer.TreeMapResponseItem
if err := rows.Scan(&item.MetricName, &item.TotalValue, &item.Percentage); err != nil {
r.logger.Error("Error scanning row", errorsV2.Attr(err))
r.logger.Error("Error scanning row", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
treemap = append(treemap, item)
}
if err := rows.Err(); err != nil {
r.logger.Error("Error iterating over rows", errorsV2.Attr(err))
r.logger.Error("Error iterating over rows", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
@@ -6112,7 +6109,7 @@ func (r *ClickHouseReader) GetMetricsSamplesPercentage(ctx context.Context, req
duration := time.Since(begin)
r.logger.Info("Time taken to execute samples percentage metric name query to reduce search space", "query", metricsQuery, "start", start, "end", end, "duration", duration)
if err != nil {
r.logger.Error("Error executing samples percentage query", errorsV2.Attr(err))
r.logger.Error("Error executing samples percentage query", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
defer rows.Close()
@@ -6123,13 +6120,13 @@ func (r *ClickHouseReader) GetMetricsSamplesPercentage(ctx context.Context, req
var metricName string
var timeSeries uint64
if err := rows.Scan(&metricName, &timeSeries); err != nil {
r.logger.Error("Error scanning metric row", errorsV2.Attr(err))
r.logger.Error("Error scanning metric row", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
metricNames = append(metricNames, metricName)
}
if err := rows.Err(); err != nil {
r.logger.Error("Error iterating over metric rows", errorsV2.Attr(err))
r.logger.Error("Error iterating over metric rows", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
@@ -6210,7 +6207,7 @@ func (r *ClickHouseReader) GetMetricsSamplesPercentage(ctx context.Context, req
duration = time.Since(begin)
r.logger.Info("Time taken to execute samples percentage query", "query", sampleQuery, "args", args, "duration", duration)
if err != nil {
r.logger.Error("Error executing samples query", errorsV2.Attr(err))
r.logger.Error("Error executing samples query", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
defer rows.Close()
@@ -6220,13 +6217,13 @@ func (r *ClickHouseReader) GetMetricsSamplesPercentage(ctx context.Context, req
for rows.Next() {
var item metrics_explorer.TreeMapResponseItem
if err := rows.Scan(&item.TotalValue, &item.MetricName, &item.Percentage); err != nil {
r.logger.Error("Error scanning row", errorsV2.Attr(err))
r.logger.Error("Error scanning row", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
treemap = append(treemap, item)
}
if err := rows.Err(); err != nil {
r.logger.Error("Error iterating over sample rows", errorsV2.Attr(err))
r.logger.Error("Error iterating over sample rows", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
@@ -6830,7 +6827,7 @@ func (r *ClickHouseReader) GetUpdatedMetricsMetadata(ctx context.Context, orgID
cacheKey := constants.UpdatedMetricsMetadataCachePrefix + metadata.MetricName
if cacheErr := r.cache.Set(ctx, orgID, cacheKey, metadata, 0); cacheErr != nil {
r.logger.Error("Failed to store metrics metadata in cache", "metric_name", metadata.MetricName, errorsV2.Attr(cacheErr))
r.logger.Error("Failed to store metrics metadata in cache", "metric_name", metadata.MetricName, "error", cacheErr)
}
cachedMetadata[metadata.MetricName] = metadata
found[metadata.MetricName] = struct{}{}
@@ -6871,7 +6868,7 @@ func (r *ClickHouseReader) GetUpdatedMetricsMetadata(ctx context.Context, orgID
cacheKey := constants.UpdatedMetricsMetadataCachePrefix + metadata.MetricName
if cacheErr := r.cache.Set(ctx, orgID, cacheKey, metadata, 0); cacheErr != nil {
r.logger.Error("Failed to cache fallback metadata", "metric_name", metadata.MetricName, errorsV2.Attr(cacheErr))
r.logger.Error("Failed to cache fallback metadata", "metric_name", metadata.MetricName, "error", cacheErr)
}
cachedMetadata[metadata.MetricName] = metadata
}
@@ -6903,7 +6900,7 @@ func (r *ClickHouseReader) SearchTraces(ctx context.Context, params *model.Searc
if err == sql.ErrNoRows {
return &searchSpansResult, nil
}
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, fmt.Errorf("error in processing sql query")
}
@@ -6918,7 +6915,7 @@ func (r *ClickHouseReader) SearchTraces(ctx context.Context, params *model.Searc
query := fmt.Sprintf("SELECT timestamp, duration_nano, span_id, trace_id, has_error, kind, resource_string_service$$name, name, links as references, attributes_string, attributes_number, attributes_bool, resources_string, events, status_message, status_code_string, kind_string FROM %s.%s WHERE trace_id=$1 and ts_bucket_start>=$2 and ts_bucket_start<=$3", r.TraceDB, r.traceTableName)
err = r.db.Select(ctx, &searchScanResponses, query, params.TraceID, strconv.FormatInt(traceSummary.Start.Unix()-1800, 10), strconv.FormatInt(traceSummary.End.Unix(), 10))
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, fmt.Errorf("error in processing sql query")
}
@@ -6930,7 +6927,7 @@ func (r *ClickHouseReader) SearchTraces(ctx context.Context, params *model.Searc
ref := []model.OtelSpanRef{}
err := json.Unmarshal([]byte(item.References), &ref)
if err != nil {
r.logger.Error("Error unmarshalling references", errorsV2.Attr(err))
r.logger.Error("Error unmarshalling references", "error", err)
return nil, err
}

View File

@@ -25,8 +25,6 @@ import (
"text/template"
"time"
"github.com/prometheus/prometheus/promql"
"github.com/SigNoz/signoz/pkg/alertmanager"
errorsV2 "github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/middleware"
@@ -37,6 +35,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/app/metricsexplorer"
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/prometheus/prometheus/promql"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
@@ -253,13 +252,13 @@ func NewAPIHandler(opts APIHandlerOpts, config signoz.Config) (*APIHandler, erro
// TODO(nitya): remote this in later for multitenancy.
orgs, err := opts.Signoz.Modules.OrgGetter.ListByOwnedKeyRange(context.Background())
if err != nil {
aH.logger.Warn("unexpected error while fetching orgs while initializing base api handler", errors.Attr(err))
aH.logger.Warn("unexpected error while fetching orgs while initializing base api handler", "error", err)
}
// if the first org with the first user is created then the setup is complete.
if len(orgs) == 1 {
count, err := opts.Signoz.Modules.UserGetter.CountByOrgID(context.Background(), orgs[0].ID)
if err != nil {
aH.logger.Warn("unexpected error while fetching user count while initializing base api handler", errors.Attr(err))
aH.logger.Warn("unexpected error while fetching user count while initializing base api handler", "error", err)
}
if count > 0 {
@@ -314,7 +313,7 @@ func RespondError(w http.ResponseWriter, apiErr model.BaseApiError, data interfa
Data: data,
})
if err != nil {
slog.Error("error marshalling json response", errors.Attr(err))
slog.Error("error marshalling json response", "error", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@@ -346,7 +345,7 @@ func RespondError(w http.ResponseWriter, apiErr model.BaseApiError, data interfa
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
if n, err := w.Write(b); err != nil {
slog.Error("error writing response", "bytes_written", n, errors.Attr(err))
slog.Error("error writing response", "bytes_written", n, "error", err)
}
}
@@ -358,7 +357,7 @@ func writeHttpResponse(w http.ResponseWriter, data interface{}) {
Data: data,
})
if err != nil {
slog.Error("error marshalling json response", errors.Attr(err))
slog.Error("error marshalling json response", "error", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@@ -366,7 +365,7 @@ func writeHttpResponse(w http.ResponseWriter, data interface{}) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
if n, err := w.Write(b); err != nil {
slog.Error("error writing response", "bytes_written", n, errors.Attr(err))
slog.Error("error writing response", "bytes_written", n, "error", err)
}
}
@@ -938,14 +937,14 @@ func (aH *APIHandler) metaForLinks(ctx context.Context, rule *ruletypes.Gettable
}
keys = model.GetLogFieldsV3(ctx, params, logFields)
} else {
aH.logger.ErrorContext(ctx, "failed to get log fields using empty keys", errors.Attr(apiErr))
aH.logger.ErrorContext(ctx, "failed to get log fields using empty keys", "error", apiErr)
}
} else if rule.AlertType == ruletypes.AlertTypeTraces {
traceFields, err := aH.reader.GetSpanAttributeKeysByNames(ctx, logsv3.GetFieldNames(rule.PostableRule.RuleCondition.CompositeQuery))
if err == nil {
keys = traceFields
} else {
aH.logger.ErrorContext(ctx, "failed to get span attributes using empty keys", errors.Attr(err))
aH.logger.ErrorContext(ctx, "failed to get span attributes using empty keys", "error", err)
}
}
@@ -1278,14 +1277,14 @@ func (aH *APIHandler) List(rw http.ResponseWriter, r *http.Request) {
installedIntegrationDashboards, apiErr := aH.IntegrationsController.GetDashboardsForInstalledIntegrations(ctx, orgID)
if apiErr != nil {
aH.logger.ErrorContext(ctx, "failed to get dashboards for installed integrations", errors.Attr(apiErr))
aH.logger.ErrorContext(ctx, "failed to get dashboards for installed integrations", "error", apiErr)
} else {
dashboards = append(dashboards, installedIntegrationDashboards...)
}
cloudIntegrationDashboards, apiErr := aH.CloudIntegrationsController.AvailableDashboards(ctx, orgID)
if apiErr != nil {
aH.logger.ErrorContext(ctx, "failed to get dashboards for cloud integrations", errors.Attr(apiErr))
aH.logger.ErrorContext(ctx, "failed to get dashboards for cloud integrations", "error", apiErr)
} else {
dashboards = append(dashboards, cloudIntegrationDashboards...)
}
@@ -1327,7 +1326,7 @@ func (aH *APIHandler) testRule(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
aH.logger.ErrorContext(r.Context(), "error reading request body for test rule", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "error reading request body for test rule", "error", err)
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@@ -1379,7 +1378,7 @@ func (aH *APIHandler) patchRule(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
aH.logger.ErrorContext(r.Context(), "error reading request body for patch rule", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "error reading request body for patch rule", "error", err)
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@@ -1409,7 +1408,7 @@ func (aH *APIHandler) editRule(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
aH.logger.ErrorContext(r.Context(), "error reading request body for edit rule", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "error reading request body for edit rule", "error", err)
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@@ -1434,7 +1433,7 @@ func (aH *APIHandler) createRule(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
aH.logger.ErrorContext(r.Context(), "error reading request body for create rule", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "error reading request body for create rule", "error", err)
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@@ -1480,7 +1479,7 @@ func (aH *APIHandler) queryRangeMetrics(w http.ResponseWriter, r *http.Request)
}
if res.Err != nil {
aH.logger.ErrorContext(r.Context(), "error in query range metrics", errors.Attr(res.Err))
aH.logger.ErrorContext(r.Context(), "error in query range metrics", "error", res.Err)
}
if res.Err != nil {
@@ -1535,7 +1534,7 @@ func (aH *APIHandler) queryMetrics(w http.ResponseWriter, r *http.Request) {
}
if res.Err != nil {
aH.logger.ErrorContext(r.Context(), "error in query range metrics", errors.Attr(res.Err))
aH.logger.ErrorContext(r.Context(), "error in query range metrics", "error", res.Err)
}
if res.Err != nil {
@@ -1638,7 +1637,7 @@ func (aH *APIHandler) getServicesTopLevelOps(w http.ResponseWriter, r *http.Requ
var params topLevelOpsParams
err := json.NewDecoder(r.Body).Decode(&params)
if err != nil {
aH.logger.ErrorContext(r.Context(), "error reading request body for get top operations", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "error reading request body for get top operations", "error", err)
}
if params.Service != "" {
@@ -2060,7 +2059,7 @@ func (aH *APIHandler) HandleError(w http.ResponseWriter, err error, statusCode i
return false
}
if statusCode == http.StatusInternalServerError {
aH.logger.Error("internal server error in http handler", errors.Attr(err))
aH.logger.Error("internal server error in http handler", "error", err)
}
structuredResp := structuredResponse{
Errors: []structuredError{
@@ -2154,7 +2153,7 @@ func (aH *APIHandler) onboardProducers(
) {
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -2162,7 +2161,7 @@ func (aH *APIHandler) onboardProducers(
chq, err := kafka.BuildClickHouseQuery(messagingQueue, kafka.KafkaQueue, "onboard_producers")
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build clickhouse query for onboard producers", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build clickhouse query for onboard producers", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -2256,7 +2255,7 @@ func (aH *APIHandler) onboardConsumers(
) {
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -2264,7 +2263,7 @@ func (aH *APIHandler) onboardConsumers(
chq, err := kafka.BuildClickHouseQuery(messagingQueue, kafka.KafkaQueue, "onboard_consumers")
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build clickhouse query for onboard consumers", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build clickhouse query for onboard consumers", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -2403,7 +2402,7 @@ func (aH *APIHandler) onboardKafka(w http.ResponseWriter, r *http.Request) {
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -2411,7 +2410,7 @@ func (aH *APIHandler) onboardKafka(w http.ResponseWriter, r *http.Request) {
queryRangeParams, err := kafka.BuildBuilderQueriesKafkaOnboarding(messagingQueue)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build kafka onboarding queries", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build kafka onboarding queries", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -2513,19 +2512,19 @@ func (aH *APIHandler) getNetworkData(w http.ResponseWriter, r *http.Request) {
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
queryRangeParams, err := kafka.BuildQRParamsWithCache(messagingQueue, "throughput", attributeCache)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for throughput", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for throughput", "error", err)
RespondError(w, apiErr, nil)
return
}
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -2564,12 +2563,12 @@ func (aH *APIHandler) getNetworkData(w http.ResponseWriter, r *http.Request) {
queryRangeParams, err = kafka.BuildQRParamsWithCache(messagingQueue, "fetch-latency", attributeCache)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for fetch latency", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for fetch latency", "error", err)
RespondError(w, apiErr, nil)
return
}
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for fetch latency", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for fetch latency", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -2624,7 +2623,7 @@ func (aH *APIHandler) getProducerData(w http.ResponseWriter, r *http.Request) {
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -2638,13 +2637,13 @@ func (aH *APIHandler) getProducerData(w http.ResponseWriter, r *http.Request) {
queryRangeParams, err := kafka.BuildQueryRangeParams(messagingQueue, "producer", kafkaSpanEval)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer", "error", err)
RespondError(w, apiErr, nil)
return
}
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -2681,7 +2680,7 @@ func (aH *APIHandler) getConsumerData(w http.ResponseWriter, r *http.Request) {
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -2691,13 +2690,13 @@ func (aH *APIHandler) getConsumerData(w http.ResponseWriter, r *http.Request) {
queryRangeParams, err := kafka.BuildQueryRangeParams(messagingQueue, "consumer", kafkaSpanEval)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for consumer", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for consumer", "error", err)
RespondError(w, apiErr, nil)
return
}
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for consumer", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for consumer", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -2739,7 +2738,7 @@ func (aH *APIHandler) getPartitionOverviewLatencyData(w http.ResponseWriter, r *
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -2749,13 +2748,13 @@ func (aH *APIHandler) getPartitionOverviewLatencyData(w http.ResponseWriter, r *
queryRangeParams, err := kafka.BuildQueryRangeParams(messagingQueue, "producer-topic-throughput", kafkaSpanEval)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer topic throughput", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer topic throughput", "error", err)
RespondError(w, apiErr, nil)
return
}
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer topic throughput", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer topic throughput", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -2797,7 +2796,7 @@ func (aH *APIHandler) getConsumerPartitionLatencyData(w http.ResponseWriter, r *
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -2807,13 +2806,13 @@ func (aH *APIHandler) getConsumerPartitionLatencyData(w http.ResponseWriter, r *
queryRangeParams, err := kafka.BuildQueryRangeParams(messagingQueue, "consumer_partition_latency", kafkaSpanEval)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for consumer partition latency", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for consumer partition latency", "error", err)
RespondError(w, apiErr, nil)
return
}
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for consumer partition latency", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for consumer partition latency", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -2857,7 +2856,7 @@ func (aH *APIHandler) getProducerThroughputOverview(w http.ResponseWriter, r *ht
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -2868,13 +2867,13 @@ func (aH *APIHandler) getProducerThroughputOverview(w http.ResponseWriter, r *ht
producerQueryRangeParams, err := kafka.BuildQRParamsWithCache(messagingQueue, "producer-throughput-overview", attributeCache)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer throughput overview", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer throughput overview", "error", err)
RespondError(w, apiErr, nil)
return
}
if err := validateQueryRangeParamsV3(producerQueryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer throughput overview", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer throughput overview", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -2910,12 +2909,12 @@ func (aH *APIHandler) getProducerThroughputOverview(w http.ResponseWriter, r *ht
queryRangeParams, err := kafka.BuildQRParamsWithCache(messagingQueue, "producer-throughput-overview-byte-rate", attributeCache)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer throughput byte rate", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer throughput byte rate", "error", err)
RespondError(w, apiErr, nil)
return
}
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer throughput byte rate", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer throughput byte rate", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -2973,7 +2972,7 @@ func (aH *APIHandler) getProducerThroughputDetails(w http.ResponseWriter, r *htt
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -2983,13 +2982,13 @@ func (aH *APIHandler) getProducerThroughputDetails(w http.ResponseWriter, r *htt
queryRangeParams, err := kafka.BuildQueryRangeParams(messagingQueue, "producer-throughput-details", kafkaSpanEval)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer throughput details", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer throughput details", "error", err)
RespondError(w, apiErr, nil)
return
}
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer throughput details", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer throughput details", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -3031,7 +3030,7 @@ func (aH *APIHandler) getConsumerThroughputOverview(w http.ResponseWriter, r *ht
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -3041,13 +3040,13 @@ func (aH *APIHandler) getConsumerThroughputOverview(w http.ResponseWriter, r *ht
queryRangeParams, err := kafka.BuildQueryRangeParams(messagingQueue, "consumer-throughput-overview", kafkaSpanEval)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for consumer throughput overview", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for consumer throughput overview", "error", err)
RespondError(w, apiErr, nil)
return
}
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for consumer throughput overview", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for consumer throughput overview", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -3089,7 +3088,7 @@ func (aH *APIHandler) getConsumerThroughputDetails(w http.ResponseWriter, r *htt
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -3099,13 +3098,13 @@ func (aH *APIHandler) getConsumerThroughputDetails(w http.ResponseWriter, r *htt
queryRangeParams, err := kafka.BuildQueryRangeParams(messagingQueue, "consumer-throughput-details", kafkaSpanEval)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for consumer throughput details", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for consumer throughput details", "error", err)
RespondError(w, apiErr, nil)
return
}
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for consumer throughput details", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for consumer throughput details", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -3150,7 +3149,7 @@ func (aH *APIHandler) getProducerConsumerEval(w http.ResponseWriter, r *http.Req
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -3160,7 +3159,7 @@ func (aH *APIHandler) getProducerConsumerEval(w http.ResponseWriter, r *http.Req
queryRangeParams, err := kafka.BuildQueryRangeParams(messagingQueue, "producer-consumer-eval", kafkaSpanEval)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer consumer eval", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer consumer eval", "error", err)
RespondError(w, &model.ApiError{
Typ: model.ErrorBadData,
Err: err,
@@ -3169,7 +3168,7 @@ func (aH *APIHandler) getProducerConsumerEval(w http.ResponseWriter, r *http.Req
}
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer consumer eval", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer consumer eval", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -4455,7 +4454,7 @@ func (aH *APIHandler) QueryRangeV3Format(w http.ResponseWriter, r *http.Request)
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil {
aH.logger.ErrorContext(r.Context(), "error parsing query range params", errors.Attr(apiErrorObj.Err))
aH.logger.ErrorContext(r.Context(), "error parsing query range params", "error", apiErrorObj.Err)
RespondError(w, apiErrorObj, nil)
return
}
@@ -4535,7 +4534,7 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que
if apiErr != nil {
aH.logger.ErrorContext(ctx, "failed to report query start for progress tracking",
"query_id", queryIdHeader, errors.Attr(apiErr),
"query_id", queryIdHeader, "error", apiErr,
)
} else {
@@ -4710,7 +4709,7 @@ func (aH *APIHandler) QueryRangeV3(w http.ResponseWriter, r *http.Request) {
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil {
aH.logger.ErrorContext(r.Context(), "error parsing metric query range params", errors.Attr(apiErrorObj.Err))
aH.logger.ErrorContext(r.Context(), "error parsing metric query range params", "error", apiErrorObj.Err)
RespondError(w, apiErrorObj, nil)
return
}
@@ -4718,7 +4717,7 @@ func (aH *APIHandler) QueryRangeV3(w http.ResponseWriter, r *http.Request) {
// add temporality for each metric
temporalityErr := aH.PopulateTemporality(r.Context(), orgID, queryRangeParams)
if temporalityErr != nil {
aH.logger.ErrorContext(r.Context(), "error adding temporality for metrics", errors.Attr(temporalityErr))
aH.logger.ErrorContext(r.Context(), "error adding temporality for metrics", "error", temporalityErr)
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
return
}
@@ -4763,7 +4762,7 @@ func (aH *APIHandler) GetQueryProgressUpdates(w http.ResponseWriter, r *http.Req
if apiErr != nil {
// Shouldn't happen unless query progress requested after query finished
aH.logger.WarnContext(r.Context(), "failed to subscribe to query progress",
"query_id", queryId, errors.Attr(apiErr),
"query_id", queryId, "error", apiErr,
)
return
}
@@ -4773,7 +4772,7 @@ func (aH *APIHandler) GetQueryProgressUpdates(w http.ResponseWriter, r *http.Req
msg, err := json.Marshal(queryProgress)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to serialize progress message",
"query_id", queryId, "progress", queryProgress, errors.Attr(err),
"query_id", queryId, "progress", queryProgress, "error", err,
)
continue
}
@@ -4781,7 +4780,7 @@ func (aH *APIHandler) GetQueryProgressUpdates(w http.ResponseWriter, r *http.Req
err = c.WriteMessage(websocket.TextMessage, msg)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to write progress message to websocket",
"query_id", queryId, "msg", string(msg), errors.Attr(err),
"query_id", queryId, "msg", string(msg), "error", err,
)
break
@@ -4929,7 +4928,7 @@ func (aH *APIHandler) QueryRangeV4(w http.ResponseWriter, r *http.Request) {
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil {
aH.logger.ErrorContext(r.Context(), "error parsing metric query range params", errors.Attr(apiErrorObj.Err))
aH.logger.ErrorContext(r.Context(), "error parsing metric query range params", "error", apiErrorObj.Err)
RespondError(w, apiErrorObj, nil)
return
}
@@ -4938,7 +4937,7 @@ func (aH *APIHandler) QueryRangeV4(w http.ResponseWriter, r *http.Request) {
// add temporality for each metric
temporalityErr := aH.PopulateTemporality(r.Context(), orgID, queryRangeParams)
if temporalityErr != nil {
aH.logger.ErrorContext(r.Context(), "error adding temporality for metrics", errors.Attr(temporalityErr))
aH.logger.ErrorContext(r.Context(), "error adding temporality for metrics", "error", temporalityErr)
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
return
}
@@ -4987,7 +4986,7 @@ func (aH *APIHandler) getQueueOverview(w http.ResponseWriter, r *http.Request) {
queueListRequest, apiErr := ParseQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -4995,7 +4994,7 @@ func (aH *APIHandler) getQueueOverview(w http.ResponseWriter, r *http.Request) {
chq, err := queues2.BuildOverviewQuery(queueListRequest)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build queue overview query", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build queue overview query", "error", err)
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: fmt.Errorf("error building clickhouse query: %v", err)}, nil)
return
}
@@ -5026,7 +5025,7 @@ func (aH *APIHandler) getDomainList(w http.ResponseWriter, r *http.Request) {
// Parse the request body to get third-party query parameters
thirdPartyQueryRequest, apiErr := ParseRequestBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse request body", errors.Attr(apiErr))
aH.logger.ErrorContext(r.Context(), "failed to parse request body", "error", apiErr)
render.Error(w, errorsV2.New(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, apiErr.Error()))
return
}
@@ -5034,7 +5033,7 @@ func (aH *APIHandler) getDomainList(w http.ResponseWriter, r *http.Request) {
// Build the v5 query range request for domain listing
queryRangeRequest, err := thirdpartyapi.BuildDomainList(thirdPartyQueryRequest)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build domain list query", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build domain list query", "error", err)
apiErrObj := errorsV2.New(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, err.Error())
render.Error(w, apiErrObj)
return
@@ -5047,7 +5046,7 @@ func (aH *APIHandler) getDomainList(w http.ResponseWriter, r *http.Request) {
// Execute the query using the v5 querier
result, err := aH.Signoz.Querier.QueryRange(ctx, orgID, queryRangeRequest)
if err != nil {
aH.logger.ErrorContext(r.Context(), "query execution failed", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "query execution failed", "error", err)
apiErrObj := errorsV2.New(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, err.Error())
render.Error(w, apiErrObj)
return
@@ -5086,7 +5085,7 @@ func (aH *APIHandler) getDomainInfo(w http.ResponseWriter, r *http.Request) {
// Parse the request body to get third-party query parameters
thirdPartyQueryRequest, apiErr := ParseRequestBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse request body", errors.Attr(apiErr))
aH.logger.ErrorContext(r.Context(), "failed to parse request body", "error", apiErr)
render.Error(w, errorsV2.New(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, apiErr.Error()))
return
}
@@ -5094,7 +5093,7 @@ func (aH *APIHandler) getDomainInfo(w http.ResponseWriter, r *http.Request) {
// Build the v5 query range request for domain info
queryRangeRequest, err := thirdpartyapi.BuildDomainInfo(thirdPartyQueryRequest)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build domain info query", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build domain info query", "error", err)
apiErrObj := errorsV2.New(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, err.Error())
render.Error(w, apiErrObj)
return
@@ -5107,7 +5106,7 @@ func (aH *APIHandler) getDomainInfo(w http.ResponseWriter, r *http.Request) {
// Execute the query using the v5 querier
result, err := aH.Signoz.Querier.QueryRange(ctx, orgID, queryRangeRequest)
if err != nil {
aH.logger.ErrorContext(r.Context(), "query execution failed", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "query execution failed", "error", err)
apiErrObj := errorsV2.New(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, err.Error())
render.Error(w, apiErrObj)
return

View File

@@ -7,8 +7,6 @@ import (
"slices"
"strings"
"github.com/google/uuid"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
"github.com/SigNoz/signoz/pkg/query-service/constants"
@@ -21,6 +19,7 @@ import (
"github.com/SigNoz/signoz/pkg/types/opamptypes"
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/google/uuid"
"log/slog"
)
@@ -176,7 +175,7 @@ func (ic *LogParsingPipelineController) getEffectivePipelinesByVersion(
if version >= 0 {
savedPipelines, err := ic.getPipelinesByVersion(ctx, orgID.String(), version)
if err != nil {
slog.ErrorContext(ctx, "failed to get pipelines for version", "version", version, errors.Attr(err))
slog.ErrorContext(ctx, "failed to get pipelines for version", "version", version, "error", err)
return nil, err
}
result = savedPipelines
@@ -228,7 +227,7 @@ func (ic *LogParsingPipelineController) GetPipelinesByVersion(
) (*PipelinesResponse, error) {
pipelines, err := ic.getEffectivePipelinesByVersion(ctx, orgId, version)
if err != nil {
slog.ErrorContext(ctx, "failed to get pipelines for version", "version", version, errors.Attr(err))
slog.ErrorContext(ctx, "failed to get pipelines for version", "version", version, "error", err)
return nil, err
}
@@ -236,7 +235,7 @@ func (ic *LogParsingPipelineController) GetPipelinesByVersion(
if version >= 0 {
cv, err := agentConf.GetConfigVersion(ctx, orgId, opamptypes.ElementTypeLogPipelines, version)
if err != nil {
slog.ErrorContext(ctx, "failed to get config for version", "version", version, errors.Attr(err))
slog.ErrorContext(ctx, "failed to get config for version", "version", version, "error", err)
return nil, err
}
configVersion = cv

View File

@@ -81,7 +81,7 @@ func (r *Repo) insertPipeline(
Model(&insertRow.StoreablePipeline).
Exec(ctx)
if err != nil {
slog.ErrorContext(ctx, "error in inserting pipeline data", errors.Attr(err))
slog.ErrorContext(ctx, "error in inserting pipeline data", "error", err)
return nil, errors.WrapInternalf(err, errors.CodeInternal, "failed to insert pipeline")
}
@@ -137,7 +137,7 @@ func (r *Repo) GetPipeline(
Where("org_id = ?", orgID).
Scan(ctx)
if err != nil {
slog.ErrorContext(ctx, "failed to get ingestion pipeline from db", errors.Attr(err))
slog.ErrorContext(ctx, "failed to get ingestion pipeline from db", "error", err)
return nil, errors.WrapInternalf(err, errors.CodeInternal, "failed to get ingestion pipeline from db")
}
@@ -150,11 +150,11 @@ func (r *Repo) GetPipeline(
gettablePipeline := pipelinetypes.GettablePipeline{}
gettablePipeline.StoreablePipeline = storablePipelines[0]
if err := gettablePipeline.ParseRawConfig(); err != nil {
slog.ErrorContext(ctx, "invalid pipeline config found", "id", id, errors.Attr(err))
slog.ErrorContext(ctx, "invalid pipeline config found", "id", id, "error", err)
return nil, err
}
if err := gettablePipeline.ParseFilter(); err != nil {
slog.ErrorContext(ctx, "invalid pipeline filter found", "id", id, errors.Attr(err))
slog.ErrorContext(ctx, "invalid pipeline filter found", "id", id, "error", err)
return nil, err
}
return &gettablePipeline, nil

View File

@@ -5,16 +5,11 @@ import (
"encoding/json"
"errors"
"sort"
"strings"
"time"
signozerrors "github.com/SigNoz/signoz/pkg/errors"
"log/slog"
"golang.org/x/sync/errgroup"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
"github.com/SigNoz/signoz/pkg/query-service/model"
@@ -23,6 +18,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/rules"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"golang.org/x/sync/errgroup"
)
type SummaryService struct {
@@ -177,14 +173,14 @@ func (receiver *SummaryService) GetMetricsSummary(ctx context.Context, orgID val
if data != nil {
jsonData, err := json.Marshal(data)
if err != nil {
slog.Error("error marshalling data", signozerrors.Attr(err))
slog.Error("error marshalling data", "error", err)
return &model.ApiError{Typ: "MarshallingErr", Err: err}
}
var dashboards map[string][]metrics_explorer.Dashboard
err = json.Unmarshal(jsonData, &dashboards)
if err != nil {
slog.Error("error unmarshalling data", signozerrors.Attr(err))
slog.Error("error unmarshalling data", "error", err)
return &model.ApiError{Typ: "UnMarshallingErr", Err: err}
}
if _, ok := dashboards[metricName]; ok {
@@ -354,12 +350,12 @@ func (receiver *SummaryService) GetRelatedMetrics(ctx context.Context, params *m
if names != nil {
jsonData, err := json.Marshal(names)
if err != nil {
slog.Error("error marshalling dashboard data", signozerrors.Attr(err))
slog.Error("error marshalling dashboard data", "error", err)
return &model.ApiError{Typ: "MarshallingErr", Err: err}
}
err = json.Unmarshal(jsonData, &dashboardsRelatedData)
if err != nil {
slog.Error("error unmarshalling dashboard data", signozerrors.Attr(err))
slog.Error("error unmarshalling dashboard data", "error", err)
return &model.ApiError{Typ: "UnMarshallingErr", Err: err}
}
}

View File

@@ -5,13 +5,12 @@ import (
"crypto/sha256"
"log/slog"
"github.com/knadh/koanf/parsers/yaml"
"github.com/open-telemetry/opamp-go/protobufs"
"go.opentelemetry.io/collector/confmap"
"github.com/SigNoz/signoz/pkg/errors"
model "github.com/SigNoz/signoz/pkg/query-service/app/opamp/model"
"github.com/SigNoz/signoz/pkg/query-service/app/opamp/otelconfig"
"github.com/knadh/koanf/parsers/yaml"
"github.com/open-telemetry/opamp-go/protobufs"
"go.opentelemetry.io/collector/confmap"
)
var (
@@ -54,7 +53,7 @@ func UpsertControlProcessors(ctx context.Context, signal string,
for _, agent := range agents {
agenthash, err := addIngestionControlToAgent(agent, signal, processors, false)
if err != nil {
slog.Error("failed to push ingestion rules config to agent", "agent_id", agent.AgentID, errors.Attr(err))
slog.Error("failed to push ingestion rules config to agent", "agent_id", agent.AgentID, "error", err)
continue
}
@@ -83,7 +82,7 @@ func addIngestionControlToAgent(agent *model.Agent, signal string, processors ma
// add ingestion control spec
err = makeIngestionControlSpec(agentConf, Signal(signal), processors)
if err != nil {
slog.Error("failed to prepare ingestion control processors for agent", "agent_id", agent.AgentID, errors.Attr(err))
slog.Error("failed to prepare ingestion control processors for agent", "agent_id", agent.AgentID, "error", err)
return confHash, err
}
@@ -134,7 +133,7 @@ func makeIngestionControlSpec(agentConf *confmap.Conf, signal Signal, processors
// merge tracesPipelinePlan with current pipeline
mergedPipeline, err := buildPipeline(signal, currentPipeline)
if err != nil {
slog.Error("failed to build pipeline", "signal", string(signal), errors.Attr(err))
slog.Error("failed to build pipeline", "signal", string(signal), "error", err)
return err
}

View File

@@ -8,12 +8,10 @@ import (
"sync"
"time"
"google.golang.org/protobuf/proto"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/opamptypes"
"github.com/SigNoz/signoz/pkg/valuer"
"google.golang.org/protobuf/proto"
"github.com/open-telemetry/opamp-go/protobufs"
opampTypes "github.com/open-telemetry/opamp-go/server/types"
@@ -86,7 +84,7 @@ func (agent *Agent) KeepOnlyLast50Agents(ctx context.Context) {
Limit(50)).
Exec(ctx)
if err != nil {
agent.logger.Error("failed to delete old agents", errors.Attr(err))
agent.logger.Error("failed to delete old agents", "error", err)
}
}
@@ -315,7 +313,7 @@ func (agent *Agent) processStatusUpdate(
func (agent *Agent) updateRemoteConfig(configProvider AgentConfigProvider) bool {
recommendedConfig, confId, err := configProvider.RecommendAgentConfig(agent.OrgID, []byte(agent.Config))
if err != nil {
agent.logger.Error("could not generate config recommendation for agent", "agent_id", agent.AgentID, errors.Attr(err))
agent.logger.Error("could not generate config recommendation for agent", "agent_id", agent.AgentID, "error", err)
return false
}

View File

@@ -6,14 +6,12 @@ import (
"net/http"
"time"
"github.com/open-telemetry/opamp-go/protobufs"
"github.com/open-telemetry/opamp-go/server"
"github.com/open-telemetry/opamp-go/server/types"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/instrumentation"
model "github.com/SigNoz/signoz/pkg/query-service/app/opamp/model"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/open-telemetry/opamp-go/protobufs"
"github.com/open-telemetry/opamp-go/server"
"github.com/open-telemetry/opamp-go/server/types"
)
var opAmpServer *Server
@@ -74,7 +72,7 @@ func (srv *Server) Start(listener string) error {
err := srv.agents.RecommendLatestConfigToAll(srv.agentConfigProvider)
if err != nil {
srv.logger.Error(
"could not roll out latest config recommendation to connected agents", errors.Attr(err),
"could not roll out latest config recommendation to connected agents", "error", err,
)
}
})
@@ -117,7 +115,7 @@ func (srv *Server) OnMessage(ctx context.Context, conn types.Connection, msg *pr
// agents sends the effective config when we processStatusUpdate.
agent, created, err := srv.agents.FindOrCreateAgent(agentID.String(), conn, orgID)
if err != nil {
srv.logger.Error("failed to find or create agent", "agent_id", agentID.String(), errors.Attr(err))
srv.logger.Error("failed to find or create agent", "agent_id", agentID.String(), "error", err)
// Return error response according to OpAMP protocol
return &protobufs.ServerToAgent{

View File

@@ -6,7 +6,6 @@ import (
"encoding/json"
"errors"
"fmt"
"math"
"net/http"
"sort"
@@ -15,16 +14,12 @@ import (
"text/template"
"time"
signozerrors "github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types/thirdpartyapitypes"
"log/slog"
"github.com/SigNoz/govaluate"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations/messagingQueues/kafka"
queues2 "github.com/SigNoz/signoz/pkg/query-service/app/integrations/messagingQueues/queues"
"log/slog"
"github.com/gorilla/mux"
promModel "github.com/prometheus/common/model"
@@ -746,7 +741,7 @@ func chTransformQuery(query string, variables map[string]interface{}) {
transformer := chVariables.NewQueryTransformer(query, varsForTransform)
transformedQuery, err := transformer.Transform()
if err != nil {
slog.Warn("failed to transform clickhouse query", "query", query, signozerrors.Attr(err))
slog.Warn("failed to transform clickhouse query", "query", query, "error", err)
}
slog.Info("transformed clickhouse query", "transformed_query", transformedQuery, "original_query", query)
}

View File

@@ -3,12 +3,10 @@ package v2
import (
"context"
"fmt"
"github.com/prometheus/prometheus/promql/parser"
"strings"
"sync"
"github.com/prometheus/prometheus/promql/parser"
"github.com/SigNoz/signoz/pkg/errors"
logsV4 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v4"
metricsV3 "github.com/SigNoz/signoz/pkg/query-service/app/metrics/v3"
metricsV4 "github.com/SigNoz/signoz/pkg/query-service/app/metrics/v4"
@@ -287,7 +285,7 @@ func (q *querier) ValidateMetricNames(ctx context.Context, query *v3.CompositeQu
for _, query := range query.PromQueries {
expr, err := parser.ParseExpr(query.Query)
if err != nil {
q.logger.DebugContext(ctx, "error parsing promql expression", "query", query.Query, errors.Attr(err))
q.logger.DebugContext(ctx, "error parsing promql expression", "query", query.Query, "error", err)
continue
}
parser.Inspect(expr, func(node parser.Node, path []parser.Node) error {
@@ -303,7 +301,7 @@ func (q *querier) ValidateMetricNames(ctx context.Context, query *v3.CompositeQu
}
metrics, err := q.reader.GetNormalizedStatus(ctx, orgID, metricNames)
if err != nil {
q.logger.DebugContext(ctx, "error getting corresponding normalized metrics", errors.Attr(err))
q.logger.DebugContext(ctx, "error getting corresponding normalized metrics", "error", err)
return
}
for metricName, metricPresent := range metrics {
@@ -321,7 +319,7 @@ func (q *querier) ValidateMetricNames(ctx context.Context, query *v3.CompositeQu
}
metrics, err := q.reader.GetNormalizedStatus(ctx, orgID, metricNames)
if err != nil {
q.logger.DebugContext(ctx, "error getting corresponding normalized metrics", errors.Attr(err))
q.logger.DebugContext(ctx, "error getting corresponding normalized metrics", "error", err)
return
}
for metricName, metricPresent := range metrics {

View File

@@ -9,7 +9,6 @@ import (
"slices"
"github.com/SigNoz/signoz/pkg/cache/memorycache"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/queryparser"
"github.com/SigNoz/signoz/pkg/ruler/rulestore/sqlrulestore"
@@ -17,9 +16,6 @@ import (
"github.com/gorilla/handlers"
"github.com/rs/cors"
"github.com/soheilhy/cmux"
"github.com/SigNoz/signoz/pkg/alertmanager"
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/http/middleware"
@@ -39,16 +35,16 @@ import (
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/web"
"log/slog"
"go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux"
"go.opentelemetry.io/otel/propagation"
"github.com/rs/cors"
"github.com/soheilhy/cmux"
"github.com/SigNoz/signoz/pkg/query-service/constants"
"github.com/SigNoz/signoz/pkg/query-service/healthcheck"
"github.com/SigNoz/signoz/pkg/query-service/rules"
"github.com/SigNoz/signoz/pkg/query-service/utils"
"go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux"
"go.opentelemetry.io/otel/propagation"
"log/slog"
)
// Server runs HTTP, Mux and a grpc server
@@ -190,7 +186,6 @@ func (s Server) HealthCheckStatus() chan healthcheck.Status {
func (s *Server) createPublicServer(api *APIHandler, web web.Web) (*http.Server, error) {
r := NewRouter()
r.Use(middleware.NewRecovery(s.signoz.Instrumentation.Logger()).Wrap)
r.Use(otelmux.Middleware(
"apiserver",
otelmux.WithMeterProvider(s.signoz.Instrumentation.MeterProvider()),
@@ -199,6 +194,7 @@ func (s *Server) createPublicServer(api *APIHandler, web web.Web) (*http.Server,
otelmux.WithFilter(func(r *http.Request) bool {
return !slices.Contains([]string{"/api/v1/health"}, r.URL.Path)
}),
otelmux.WithPublicEndpoint(),
))
r.Use(middleware.NewIdentN(s.signoz.IdentNResolver, s.signoz.Sharder, s.signoz.Instrumentation.Logger()).Wrap)
r.Use(middleware.NewTimeout(s.signoz.Instrumentation.Logger(),
@@ -289,7 +285,7 @@ func (s *Server) Start(ctx context.Context) error {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
// normal exit, nothing to do
default:
slog.Error("Could not start HTTP server", errors.Attr(err))
slog.Error("Could not start HTTP server", "error", err)
}
s.unavailableChannel <- healthcheck.Unavailable
}()
@@ -299,7 +295,7 @@ func (s *Server) Start(ctx context.Context) error {
err = http.ListenAndServe(constants.DebugHttpPort, nil)
if err != nil {
slog.Error("Could not start pprof server", errors.Attr(err))
slog.Error("Could not start pprof server", "error", err)
}
}()
@@ -307,7 +303,7 @@ func (s *Server) Start(ctx context.Context) error {
slog.Info("Starting OpAmp Websocket server", "addr", constants.OpAmpWsEndpoint)
err := s.opampServer.Start(constants.OpAmpWsEndpoint)
if err != nil {
slog.Error("opamp ws server failed to start", errors.Attr(err))
slog.Error("opamp ws server failed to start", "error", err)
s.unavailableChannel <- healthcheck.Unavailable
}
}()

View File

@@ -5,7 +5,6 @@ import (
"io"
"net/http"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types/authtypes"
@@ -24,13 +23,13 @@ func (aH *APIHandler) FilterKeysSuggestion(w http.ResponseWriter, r *http.Reques
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
params, apiError := explorer.ParseFilterKeySuggestions(r)
if apiError != nil {
slog.ErrorContext(ctx, "error parsing summary filter keys request", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error parsing summary filter keys request", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
keys, apiError := aH.SummaryService.FilterKeys(ctx, params)
if apiError != nil {
slog.ErrorContext(ctx, "error getting filter keys", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error getting filter keys", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
@@ -54,14 +53,14 @@ func (aH *APIHandler) FilterValuesSuggestion(w http.ResponseWriter, r *http.Requ
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
params, apiError := explorer.ParseFilterValueSuggestions(r)
if apiError != nil {
slog.ErrorContext(ctx, "error parsing summary filter values request", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error parsing summary filter values request", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
values, apiError := aH.SummaryService.FilterValues(ctx, orgID, params)
if apiError != nil {
slog.ErrorContext(ctx, "error getting filter values", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error getting filter values", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
@@ -84,7 +83,7 @@ func (aH *APIHandler) GetMetricsDetails(w http.ResponseWriter, r *http.Request)
metricName := mux.Vars(r)["metric_name"]
metricsDetail, apiError := aH.SummaryService.GetMetricsSummary(ctx, orgID, metricName)
if apiError != nil {
slog.ErrorContext(ctx, "error getting metrics summary", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error getting metrics summary", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
@@ -108,14 +107,14 @@ func (aH *APIHandler) ListMetrics(w http.ResponseWriter, r *http.Request) {
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
params, apiErr := explorer.ParseSummaryListMetricsParams(r)
if apiErr != nil {
slog.ErrorContext(ctx, "error parsing metric list metric summary api request", errors.Attr(apiErr.Err))
slog.ErrorContext(ctx, "error parsing metric list metric summary api request", "error", apiErr.Err)
RespondError(w, model.BadRequest(apiErr), nil)
return
}
slmr, apiErr := aH.SummaryService.ListMetricsWithSummary(ctx, orgID, params)
if apiErr != nil {
slog.ErrorContext(ctx, "error in getting list metrics summary", errors.Attr(apiErr.Err))
slog.ErrorContext(ctx, "error in getting list metrics summary", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -128,13 +127,13 @@ func (aH *APIHandler) GetTreeMap(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
params, apiError := explorer.ParseTreeMapMetricsParams(r)
if apiError != nil {
slog.ErrorContext(ctx, "error parsing tree map metric params", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error parsing tree map metric params", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
result, apiError := aH.SummaryService.GetMetricsTreemap(ctx, params)
if apiError != nil {
slog.ErrorContext(ctx, "error getting tree map data", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error getting tree map data", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
@@ -148,13 +147,13 @@ func (aH *APIHandler) GetRelatedMetrics(w http.ResponseWriter, r *http.Request)
ctx := r.Context()
params, apiError := explorer.ParseRelatedMetricsParams(r)
if apiError != nil {
slog.ErrorContext(ctx, "error parsing related metric params", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error parsing related metric params", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
result, apiError := aH.SummaryService.GetRelatedMetrics(ctx, params)
if apiError != nil {
slog.ErrorContext(ctx, "error getting related metrics", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error getting related metrics", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
@@ -168,13 +167,13 @@ func (aH *APIHandler) GetInspectMetricsData(w http.ResponseWriter, r *http.Reque
ctx := r.Context()
params, apiError := explorer.ParseInspectMetricsParams(r)
if apiError != nil {
slog.ErrorContext(ctx, "error parsing inspect metric params", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error parsing inspect metric params", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
result, apiError := aH.SummaryService.GetInspectMetrics(ctx, params)
if apiError != nil {
slog.ErrorContext(ctx, "error getting inspect metrics data", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error getting inspect metrics data", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
@@ -199,13 +198,13 @@ func (aH *APIHandler) UpdateMetricsMetadata(w http.ResponseWriter, r *http.Reque
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
params, apiError := explorer.ParseUpdateMetricsMetadataParams(r)
if apiError != nil {
slog.ErrorContext(ctx, "error parsing update metrics metadata params", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error parsing update metrics metadata params", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
apiError = aH.SummaryService.UpdateMetricsMetadata(ctx, orgID, params)
if apiError != nil {
slog.ErrorContext(ctx, "error updating metrics metadata", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error updating metrics metadata", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}

View File

@@ -5,7 +5,6 @@ import (
"log/slog"
"strconv"
signozerrors "github.com/SigNoz/signoz/pkg/errors"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
)
@@ -54,7 +53,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
break
}
if err != nil {
slog.Error("error during breadth first search", signozerrors.Attr(err))
slog.Error("error during breadth first search", "error", err)
return nil, err
}
}

View File

@@ -4,7 +4,6 @@ import (
"log/slog"
"strconv"
"github.com/SigNoz/signoz/pkg/errors"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/utils"
)
@@ -69,7 +68,7 @@ func TraceIdFilterUsedWithEqual(params *v3.QueryRangeParamsV3) (bool, []string)
val := item.Value
val, err = utils.ValidateAndCastValue(val, item.Key.DataType)
if err != nil {
slog.Error("invalid value for key", "key", item.Key.Key, errors.Attr(err))
slog.Error("invalid value for key", "key", item.Key.Key, "error", err)
return false, []string{}
}
if val != nil {

View File

@@ -11,10 +11,8 @@ import (
"log/slog"
"github.com/pkg/errors"
signozerrors "github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/pkg/errors"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
)
@@ -940,7 +938,7 @@ func (b *BuilderQuery) SetShiftByFromFunc() {
} else if shift, ok := function.Args[0].(string); ok {
shiftBy, err := strconv.ParseFloat(shift, 64)
if err != nil {
slog.Error("failed to parse time shift by", "shift", shift, signozerrors.Attr(err))
slog.Error("failed to parse time shift by", "shift", shift, "error", err)
}
timeShiftBy = int64(shiftBy)
}

View File

@@ -4,8 +4,6 @@ import (
"log/slog"
"github.com/SigNoz/govaluate"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
)
@@ -58,12 +56,12 @@ func PostProcessResult(result []*v3.Result, queryRangeParams *v3.QueryRangeParam
expression, err := govaluate.NewEvaluableExpressionWithFunctions(query.Expression, EvalFuncs())
// This shouldn't happen here, because it should have been caught earlier in validation
if err != nil {
slog.Error("error in expression", errors.Attr(err))
slog.Error("error in expression", "error", err)
return nil, err
}
formulaResult, err := processResults(result, expression, canDefaultZero)
if err != nil {
slog.Error("error in expression", errors.Attr(err))
slog.Error("error in expression", "error", err)
return nil, err
}
formulaResult.QueryName = query.QueryName

View File

@@ -7,11 +7,10 @@ import (
"sort"
"time"
"log/slog"
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/errors"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"log/slog"
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
"github.com/SigNoz/signoz/pkg/types/cachetypes"
@@ -293,7 +292,7 @@ func (q *queryCache) storeMergedData(orgID valuer.UUID, cacheKey string, mergedD
cacheableSeriesData := CacheableSeriesData{Series: mergedData}
err := q.cache.Set(context.TODO(), orgID, cacheKey, &cacheableSeriesData, 0)
if err != nil {
slog.Error("error storing merged data", errors.Attr(err))
slog.Error("error storing merged data", "error", err)
}
}

View File

@@ -369,7 +369,7 @@ func (r *BaseRule) SendAlerts(ctx context.Context, ts time.Time, resendDelay tim
Limit(1).
Scan(ctx, &orgID)
if err != nil {
r.logger.ErrorContext(ctx, "failed to get org ids", errors.Attr(err))
r.logger.ErrorContext(ctx, "failed to get org ids", "error", err)
return
}
@@ -485,7 +485,7 @@ func (r *BaseRule) RecordRuleStateHistory(ctx context.Context, prevState, curren
}
err := r.reader.AddRuleStateHistory(ctx, entries)
if err != nil {
r.logger.ErrorContext(ctx, "error while inserting rule state history", errors.Attr(err), "itemsToAdd", itemsToAdd)
r.logger.ErrorContext(ctx, "error while inserting rule state history", "error", err, "itemsToAdd", itemsToAdd)
}
}
r.handledRestart = true

View File

@@ -240,7 +240,7 @@ func NewManager(o *ManagerOptions) (*Manager, error) {
func (m *Manager) Start(ctx context.Context) {
if err := m.initiate(ctx); err != nil {
m.logger.ErrorContext(ctx, "failed to initialize alerting rules manager", errors.Attr(err))
m.logger.ErrorContext(ctx, "failed to initialize alerting rules manager", "error", err)
}
m.run(ctx)
}
@@ -298,7 +298,7 @@ func (m *Manager) initiate(ctx context.Context) error {
if !parsedRule.Disabled {
err := m.addTask(ctx, org.ID, &parsedRule, taskName)
if err != nil {
m.logger.ErrorContext(ctx, "failed to load the rule definition", "name", taskName, errors.Attr(err))
m.logger.ErrorContext(ctx, "failed to load the rule definition", "name", taskName, "error", err)
}
}
}
@@ -419,7 +419,7 @@ func (m *Manager) editTask(_ context.Context, orgID valuer.UUID, rule *ruletypes
})
if err != nil {
m.logger.Error("loading tasks failed", errors.Attr(err))
m.logger.Error("loading tasks failed", "error", err)
return errors.NewInvalidInputf(errors.CodeInvalidInput, "error preparing rule with given parameters, previous rule set restored")
}
@@ -455,7 +455,7 @@ func (m *Manager) editTask(_ context.Context, orgID valuer.UUID, rule *ruletypes
func (m *Manager) DeleteRule(ctx context.Context, idStr string) error {
id, err := valuer.NewUUID(idStr)
if err != nil {
m.logger.Error("delete rule received a rule id in invalid format, must be a valid uuid-v7", "id", idStr, errors.Attr(err))
m.logger.Error("delete rule received a rule id in invalid format, must be a valid uuid-v7", "id", idStr, "error", err)
return fmt.Errorf("delete rule received an rule id in invalid format, must be a valid uuid-v7")
}
@@ -628,7 +628,7 @@ func (m *Manager) addTask(_ context.Context, orgID valuer.UUID, rule *ruletypes.
})
if err != nil {
m.logger.Error("creating rule task failed", "name", taskName, errors.Attr(err))
m.logger.Error("creating rule task failed", "name", taskName, "error", err)
return errors.NewInvalidInputf(errors.CodeInvalidInput, "error loading rules, previous rule set restored")
}
@@ -784,7 +784,7 @@ func (m *Manager) prepareTestNotifyFunc() NotifyFunc {
}
err := m.alertmanager.TestAlert(ctx, orgID, ruleID, receiverMap)
if err != nil {
m.logger.ErrorContext(ctx, "failed to send test notification", errors.Attr(err))
m.logger.ErrorContext(ctx, "failed to send test notification", "error", err)
return
}
}
@@ -819,7 +819,7 @@ func (m *Manager) ListRuleStates(ctx context.Context) (*ruletypes.GettableRules,
ruleResponse := ruletypes.GettableRule{}
err = json.Unmarshal([]byte(s.Data), &ruleResponse)
if err != nil {
m.logger.ErrorContext(ctx, "failed to unmarshal rule from db", "id", s.ID.StringValue(), errors.Attr(err))
m.logger.ErrorContext(ctx, "failed to unmarshal rule from db", "id", s.ID.StringValue(), "error", err)
continue
}
@@ -850,7 +850,7 @@ func (m *Manager) GetRule(ctx context.Context, id valuer.UUID) (*ruletypes.Getta
r := ruletypes.GettableRule{}
err = json.Unmarshal([]byte(s.Data), &r)
if err != nil {
m.logger.ErrorContext(ctx, "failed to unmarshal rule from db", "id", s.ID.StringValue(), errors.Attr(err))
m.logger.ErrorContext(ctx, "failed to unmarshal rule from db", "id", s.ID.StringValue(), "error", err)
return nil, err
}
r.Id = id.StringValue()
@@ -919,30 +919,30 @@ func (m *Manager) PatchRule(ctx context.Context, ruleStr string, id valuer.UUID)
// retrieve rule from DB
storedJSON, err := m.ruleStore.GetStoredRule(ctx, id)
if err != nil {
m.logger.ErrorContext(ctx, "failed to get stored rule with given id", "id", id.StringValue(), errors.Attr(err))
m.logger.ErrorContext(ctx, "failed to get stored rule with given id", "id", id.StringValue(), "error", err)
return nil, err
}
storedRule := ruletypes.PostableRule{}
if err := json.Unmarshal([]byte(storedJSON.Data), &storedRule); err != nil {
m.logger.ErrorContext(ctx, "failed to unmarshal rule from db", "id", id.StringValue(), errors.Attr(err))
m.logger.ErrorContext(ctx, "failed to unmarshal rule from db", "id", id.StringValue(), "error", err)
return nil, err
}
if err := json.Unmarshal([]byte(ruleStr), &storedRule); err != nil {
m.logger.ErrorContext(ctx, "failed to unmarshal patched rule with given id", "id", id.StringValue(), errors.Attr(err))
m.logger.ErrorContext(ctx, "failed to unmarshal patched rule with given id", "id", id.StringValue(), "error", err)
return nil, err
}
// deploy or un-deploy task according to patched (new) rule state
if err := m.syncRuleStateWithTask(ctx, orgID, taskName, &storedRule); err != nil {
m.logger.ErrorContext(ctx, "failed to sync stored rule state with the task", "task_name", taskName, errors.Attr(err))
m.logger.ErrorContext(ctx, "failed to sync stored rule state with the task", "task_name", taskName, "error", err)
return nil, err
}
newStoredJson, err := json.Marshal(&storedRule)
if err != nil {
m.logger.ErrorContext(ctx, "failed to marshal new stored rule with given id", "id", id.StringValue(), errors.Attr(err))
m.logger.ErrorContext(ctx, "failed to marshal new stored rule with given id", "id", id.StringValue(), "error", err)
return nil, err
}
@@ -954,7 +954,7 @@ func (m *Manager) PatchRule(ctx context.Context, ruleStr string, id valuer.UUID)
err = m.ruleStore.EditRule(ctx, storedJSON, func(ctx context.Context) error { return nil })
if err != nil {
if err := m.syncRuleStateWithTask(ctx, orgID, taskName, &storedRule); err != nil {
m.logger.ErrorContext(ctx, "failed to restore rule after patch failure", "task_name", taskName, errors.Attr(err))
m.logger.ErrorContext(ctx, "failed to restore rule after patch failure", "task_name", taskName, "error", err)
}
return nil, err
}
@@ -1022,7 +1022,7 @@ func (m *Manager) GetAlertDetailsForMetricNames(ctx context.Context, metricNames
result := make(map[string][]ruletypes.GettableRule)
rules, err := m.ruleStore.GetStoredRules(ctx, claims.OrgID)
if err != nil {
m.logger.ErrorContext(ctx, "error getting stored rules", errors.Attr(err))
m.logger.ErrorContext(ctx, "error getting stored rules", "error", err)
return nil, &model.ApiError{Typ: model.ErrorExec, Err: err}
}
@@ -1032,7 +1032,7 @@ func (m *Manager) GetAlertDetailsForMetricNames(ctx context.Context, metricNames
var rule ruletypes.GettableRule
err = json.Unmarshal([]byte(storedRule.Data), &rule)
if err != nil {
m.logger.ErrorContext(ctx, "failed to unmarshal rule from db", "id", storedRule.ID.StringValue(), errors.Attr(err))
m.logger.ErrorContext(ctx, "failed to unmarshal rule from db", "id", storedRule.ID.StringValue(), "error", err)
continue
}

View File

@@ -7,9 +7,6 @@ import (
"log/slog"
"time"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/prometheus"
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
@@ -22,6 +19,8 @@ import (
"github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/SigNoz/signoz/pkg/units"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
)
type PromRule struct {
@@ -157,7 +156,7 @@ func (r *PromRule) buildAndRunQuery(ctx context.Context, ts time.Time) (ruletype
filteredSeries, filterErr := r.BaseRule.FilterNewSeries(ctx, ts, matrixToProcess)
// In case of error we log the error and continue with the original series
if filterErr != nil {
r.logger.ErrorContext(ctx, "Error filtering new series, ", errors.Attr(filterErr), "rule_name", r.Name())
r.logger.ErrorContext(ctx, "Error filtering new series, ", "error", filterErr, "rule_name", r.Name())
} else {
matrixToProcess = filteredSeries
}
@@ -234,7 +233,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (int, error) {
result, err := tmpl.Expand()
if err != nil {
result = fmt.Sprintf("<error expanding template: %s>", err)
r.logger.WarnContext(ctx, "Expanding alert template failed", "rule_name", r.Name(), errors.Attr(err), "data", tmplData)
r.logger.WarnContext(ctx, "Expanding alert template failed", "rule_name", r.Name(), "error", err, "data", tmplData)
}
return result
}
@@ -312,7 +311,7 @@ func (r *PromRule) Eval(ctx context.Context, ts time.Time) (int, error) {
for fp, a := range r.Active {
labelsJSON, err := json.Marshal(a.QueryResultLables)
if err != nil {
r.logger.ErrorContext(ctx, "error marshaling labels", errors.Attr(err), "rule_name", r.Name())
r.logger.ErrorContext(ctx, "error marshaling labels", "error", err, "rule_name", r.Name())
}
if _, ok := resultFPs[fp]; !ok {
// If the alert was previously firing, keep it around for a given

View File

@@ -9,14 +9,12 @@ import (
"log/slog"
opentracing "github.com/opentracing/opentracing-go"
plabels "github.com/prometheus/prometheus/model/labels"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/ctxtypes"
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/SigNoz/signoz/pkg/valuer"
opentracing "github.com/opentracing/opentracing-go"
plabels "github.com/prometheus/prometheus/model/labels"
)
// PromRuleTask is a promql rule executor
@@ -333,7 +331,7 @@ func (g *PromRuleTask) Eval(ctx context.Context, ts time.Time) {
g.logger.InfoContext(ctx, "promql rule task", "name", g.name, "eval_started_at", ts)
maintenance, err := g.maintenanceStore.GetAllPlannedMaintenance(ctx, g.orgID.StringValue())
if err != nil {
g.logger.ErrorContext(ctx, "error in processing sql query", errors.Attr(err))
g.logger.ErrorContext(ctx, "error in processing sql query", "error", err)
}
for i, rule := range g.rules {
@@ -383,7 +381,7 @@ func (g *PromRuleTask) Eval(ctx context.Context, ts time.Time) {
rule.SetHealth(ruletypes.HealthBad)
rule.SetLastError(err)
g.logger.WarnContext(ctx, "evaluating rule failed", "rule_id", rule.ID(), errors.Attr(err))
g.logger.WarnContext(ctx, "evaluating rule failed", "rule_id", rule.ID(), "error", err)
// Canceled queries are intentional termination of queries. This normally
// happens on shutdown and thus we skip logging of any errors here.

View File

@@ -9,14 +9,12 @@ import (
"log/slog"
opentracing "github.com/opentracing/opentracing-go"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/ctxtypes"
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/SigNoz/signoz/pkg/valuer"
opentracing "github.com/opentracing/opentracing-go"
)
// RuleTask holds a rule (with composite queries)
@@ -319,7 +317,7 @@ func (g *RuleTask) Eval(ctx context.Context, ts time.Time) {
maintenance, err := g.maintenanceStore.GetAllPlannedMaintenance(ctx, g.orgID.StringValue())
if err != nil {
g.logger.ErrorContext(ctx, "error in processing sql query", errors.Attr(err))
g.logger.ErrorContext(ctx, "error in processing sql query", "error", err)
}
for i, rule := range g.rules {
@@ -369,7 +367,7 @@ func (g *RuleTask) Eval(ctx context.Context, ts time.Time) {
rule.SetHealth(ruletypes.HealthBad)
rule.SetLastError(err)
g.logger.WarnContext(ctx, "evaluating rule failed", "rule_id", rule.ID(), errors.Attr(err))
g.logger.WarnContext(ctx, "evaluating rule failed", "rule_id", rule.ID(), "error", err)
// Canceled queries are intentional termination of queries. This normally
// happens on shutdown and thus we skip logging of any errors here.

View File

@@ -5,14 +5,11 @@ import (
"fmt"
"time"
"log/slog"
"github.com/google/uuid"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/google/uuid"
"log/slog"
)
// TestNotification prepares a dummy rule for given rule parameters and
@@ -60,7 +57,7 @@ func defaultTestNotification(opts PrepareTestRuleOptions) (int, *model.ApiError)
)
if err != nil {
slog.Error("failed to prepare a new threshold rule for test", errors.Attr(err))
slog.Error("failed to prepare a new threshold rule for test", "error", err)
return 0, model.BadRequest(err)
}
@@ -82,7 +79,7 @@ func defaultTestNotification(opts PrepareTestRuleOptions) (int, *model.ApiError)
)
if err != nil {
slog.Error("failed to prepare a new promql rule for test", errors.Attr(err))
slog.Error("failed to prepare a new promql rule for test", "error", err)
return 0, model.BadRequest(err)
}
} else {
@@ -94,7 +91,7 @@ func defaultTestNotification(opts PrepareTestRuleOptions) (int, *model.ApiError)
alertsFound, err := rule.Eval(ctx, ts)
if err != nil {
slog.Error("evaluating rule failed", "rule", rule.Name(), errors.Attr(err))
slog.Error("evaluating rule failed", "rule", rule.Name(), "error", err)
return 0, model.InternalError(fmt.Errorf("rule evaluation failed"))
}
rule.SendAlerts(ctx, ts, 0, time.Duration(1*time.Minute), opts.NotifyFunc)

View File

@@ -13,7 +13,6 @@ import (
"time"
"github.com/SigNoz/signoz/pkg/contextlinks"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/common"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/query-service/postprocess"
@@ -447,14 +446,14 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID,
}
if err != nil {
r.logger.ErrorContext(ctx, "failed to get alert query range result", "rule_name", r.Name(), errors.Attr(err), "query_errors", queryErrors)
r.logger.ErrorContext(ctx, "failed to get alert query range result", "rule_name", r.Name(), "error", err, "query_errors", queryErrors)
return nil, fmt.Errorf("internal error while querying")
}
if params.CompositeQuery.QueryType == v3.QueryTypeBuilder {
results, err = postprocess.PostProcessResult(results, params)
if err != nil {
r.logger.ErrorContext(ctx, "failed to post process result", "rule_name", r.Name(), errors.Attr(err))
r.logger.ErrorContext(ctx, "failed to post process result", "rule_name", r.Name(), "error", err)
return nil, fmt.Errorf("internal error while post processing")
}
}
@@ -514,7 +513,7 @@ func (r *ThresholdRule) buildAndRunQueryV5(ctx context.Context, orgID valuer.UUI
v5Result, err := r.querierV5.QueryRange(ctx, orgID, params)
if err != nil {
r.logger.ErrorContext(ctx, "failed to get alert query result", "rule_name", r.Name(), errors.Attr(err))
r.logger.ErrorContext(ctx, "failed to get alert query result", "rule_name", r.Name(), "error", err)
return nil, fmt.Errorf("internal error while querying")
}
@@ -555,7 +554,7 @@ func (r *ThresholdRule) buildAndRunQueryV5(ctx context.Context, orgID valuer.UUI
filteredSeries, filterErr := r.BaseRule.FilterNewSeries(ctx, ts, seriesToProcess)
// In case of error we log the error and continue with the original series
if filterErr != nil {
r.logger.ErrorContext(ctx, "Error filtering new series, ", errors.Attr(filterErr), "rule_name", r.Name())
r.logger.ErrorContext(ctx, "Error filtering new series, ", "error", filterErr, "rule_name", r.Name())
} else {
seriesToProcess = filteredSeries
}
@@ -640,7 +639,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (int, error) {
result, err := tmpl.Expand()
if err != nil {
result = fmt.Sprintf("<error expanding template: %s>", err)
r.logger.ErrorContext(ctx, "Expanding alert template failed", errors.Attr(err), "data", tmplData)
r.logger.ErrorContext(ctx, "Expanding alert template failed", "error", err, "data", tmplData)
}
return result
}
@@ -733,7 +732,7 @@ func (r *ThresholdRule) Eval(ctx context.Context, ts time.Time) (int, error) {
for fp, a := range r.Active {
labelsJSON, err := json.Marshal(a.QueryResultLables)
if err != nil {
r.logger.ErrorContext(ctx, "error marshaling labels", errors.Attr(err), "labels", a.Labels)
r.logger.ErrorContext(ctx, "error marshaling labels", "error", err, "labels", a.Labels)
}
if _, ok := resultFPs[fp]; !ok {
// If the alert was previously firing, keep it around for a given

View File

@@ -934,7 +934,7 @@ func (v *filterExpressionVisitor) VisitKey(ctx *grammar.KeyContext) any {
v.warnings = append(v.warnings, warnMsg)
}
v.keysWithWarnings[keyName] = true
v.logger.Warn("ambiguous key", slog.String("field_key_name", fieldKey.Name)) //nolint:sloglint
v.logger.Warn("ambiguous key", "field_key_name", fieldKey.Name) //nolint:sloglint
}
return fieldKeysForName

View File

@@ -3,7 +3,6 @@ package queryparser
import (
"net/http"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/http/binding"
"github.com/SigNoz/signoz/pkg/http/render"
@@ -32,7 +31,7 @@ func (a *API) AnalyzeQueryFilter(w http.ResponseWriter, r *http.Request) {
result, err := a.queryParser.AnalyzeQueryFilter(r.Context(), req.QueryType, req.Query)
if err != nil {
a.settings.Logger.ErrorContext(r.Context(), "failed to analyze query filter", errors.Attr(err))
a.settings.Logger.ErrorContext(r.Context(), "failed to analyze query filter", "error", err)
render.Error(w, err)
return
}

View File

@@ -133,7 +133,7 @@ func (r *rule) GetStoredRulesByMetricName(ctx context.Context, orgID string, met
for _, storedRule := range storedRules {
var ruleData ruletypes.PostableRule
if err := json.Unmarshal([]byte(storedRule.Data), &ruleData); err != nil {
r.logger.WarnContext(ctx, "failed to unmarshal rule data", slog.String("rule_id", storedRule.ID.StringValue()), errors.Attr(err))
r.logger.WarnContext(ctx, "failed to unmarshal rule data", "rule_id", storedRule.ID.StringValue(), "error", err)
continue
}
@@ -167,7 +167,7 @@ func (r *rule) GetStoredRulesByMetricName(ctx context.Context, orgID string, met
if spec, ok := queryEnvelope.Spec.(qbtypes.PromQuery); ok {
result, err := r.queryParser.AnalyzeQueryFilter(ctx, qbtypes.QueryTypePromQL, spec.Query)
if err != nil {
r.logger.WarnContext(ctx, "failed to parse PromQL query", slog.String("query", spec.Query), errors.Attr(err))
r.logger.WarnContext(ctx, "failed to parse PromQL query", "query", spec.Query, "error", err)
continue
}
if slices.Contains(result.MetricNames, metricName) {
@@ -179,7 +179,7 @@ func (r *rule) GetStoredRulesByMetricName(ctx context.Context, orgID string, met
if spec, ok := queryEnvelope.Spec.(qbtypes.ClickHouseQuery); ok {
result, err := r.queryParser.AnalyzeQueryFilter(ctx, qbtypes.QueryTypeClickHouseSQL, spec.Query)
if err != nil {
r.logger.WarnContext(ctx, "failed to parse ClickHouse query", slog.String("query", spec.Query), errors.Attr(err))
r.logger.WarnContext(ctx, "failed to parse ClickHouse query", "query", spec.Query, "error", err)
continue
}
if slices.Contains(result.MetricNames, metricName) {

View File

@@ -2,7 +2,6 @@ package signoz
import (
"context"
"log/slog"
"github.com/SigNoz/signoz/pkg/alertmanager"
"github.com/SigNoz/signoz/pkg/alertmanager/nfmanager"
@@ -101,8 +100,8 @@ func New(
return nil, err
}
instrumentation.Logger().InfoContext(ctx, "starting signoz", slog.String("version", version.Info.Version()), slog.String("variant", version.Info.Variant()), slog.String("commit", version.Info.Hash()), slog.String("branch", version.Info.Branch()), slog.String("go", version.Info.GoVersion()), slog.String("time", version.Info.Time()))
instrumentation.Logger().DebugContext(ctx, "loaded signoz config", slog.Any("config", config))
instrumentation.Logger().InfoContext(ctx, "starting signoz", "version", version.Info.Version(), "variant", version.Info.Variant(), "commit", version.Info.Hash(), "branch", version.Info.Branch(), "go", version.Info.GoVersion(), "time", version.Info.Time())
instrumentation.Logger().DebugContext(ctx, "loaded signoz config", "config", config)
// Get the provider settings from instrumentation
providerSettings := instrumentation.ToProviderSettings()

Some files were not shown because too many files have changed in this diff Show More