Compare commits

..

16 Commits

Author SHA1 Message Date
Karan Balani
07ca268e9b fix: nil errors without making frontend changes 2026-03-21 23:32:04 +05:30
Karan Balani
00723f4348 chore: update openapi specs 2026-03-21 23:24:49 +05:30
Karan Balani
be5100ad5b fix: nil pointer for update user in integration test due to half payload being passed 2026-03-21 23:23:57 +05:30
Karan Balani
b736e7d412 chore: arrange getter and setter methods 2026-03-21 13:56:27 +05:30
Karan Balani
4c4b0a10b5 chore: move user module as user setter 2026-03-21 13:35:58 +05:30
Karan Balani
2c27183ed8 chore: return userroles with user in getter where possible 2026-03-21 13:19:25 +05:30
Karan Balani
3c84806999 chore: rename userrolestore to user_role_store 2026-03-21 13:11:56 +05:30
Karan Balani
363716defa chore: address pr comments 2026-03-21 13:11:19 +05:30
Karan Balani
7b613204c4 chore: user 0th role instead of highest 2026-03-21 12:11:29 +05:30
Karan Balani
5df65faa35 chore: remove refs of calling vars as storable users 2026-03-21 12:02:35 +05:30
Karan Balani
6f7e0402ed chore: remove storable user struct and minor other changes 2026-03-21 12:00:44 +05:30
Karan Balani
111a08ebfc fix: raw queries pointing to role column in users table 2026-03-21 02:05:14 +05:30
Karan Balani
26db7b9f9a feat: add migration to drop role column from users table 2026-03-21 01:41:07 +05:30
Karan Balani
00b386b121 fix: user types and order of update user 2026-03-21 00:19:58 +05:30
Karan Balani
ee116900c2 fix: golint and register migrations 2026-03-21 00:04:18 +05:30
Karan Balani
87d89fb1ef feat: introduce user_role table 2026-03-20 23:49:09 +05:30
148 changed files with 2115 additions and 1516 deletions

View File

@@ -35,7 +35,7 @@ linters:
- identical
sloglint:
no-mixed-args: true
attr-only: true
kv-only: true
no-global: all
context: all
static-msg: true

View File

@@ -4,15 +4,12 @@ import (
"context"
"log/slog"
"github.com/spf13/cobra"
"github.com/SigNoz/signoz/cmd"
"github.com/SigNoz/signoz/pkg/analytics"
"github.com/SigNoz/signoz/pkg/authn"
"github.com/SigNoz/signoz/pkg/authz"
"github.com/SigNoz/signoz/pkg/authz/openfgaauthz"
"github.com/SigNoz/signoz/pkg/authz/openfgaschema"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/gateway"
"github.com/SigNoz/signoz/pkg/gateway/noopgateway"
@@ -31,6 +28,7 @@ import (
"github.com/SigNoz/signoz/pkg/version"
"github.com/SigNoz/signoz/pkg/zeus"
"github.com/SigNoz/signoz/pkg/zeus/noopzeus"
"github.com/spf13/cobra"
)
func registerServer(parentCmd *cobra.Command, logger *slog.Logger) {
@@ -92,37 +90,37 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
},
)
if err != nil {
logger.ErrorContext(ctx, "failed to create signoz", errors.Attr(err))
logger.ErrorContext(ctx, "failed to create signoz", "error", err)
return err
}
server, err := app.NewServer(config, signoz)
if err != nil {
logger.ErrorContext(ctx, "failed to create server", errors.Attr(err))
logger.ErrorContext(ctx, "failed to create server", "error", err)
return err
}
if err := server.Start(ctx); err != nil {
logger.ErrorContext(ctx, "failed to start server", errors.Attr(err))
logger.ErrorContext(ctx, "failed to start server", "error", err)
return err
}
signoz.Start(ctx)
if err := signoz.Wait(ctx); err != nil {
logger.ErrorContext(ctx, "failed to start signoz", errors.Attr(err))
logger.ErrorContext(ctx, "failed to start signoz", "error", err)
return err
}
err = server.Stop(ctx)
if err != nil {
logger.ErrorContext(ctx, "failed to stop server", errors.Attr(err))
logger.ErrorContext(ctx, "failed to stop server", "error", err)
return err
}
err = signoz.Stop(ctx)
if err != nil {
logger.ErrorContext(ctx, "failed to stop signoz", errors.Attr(err))
logger.ErrorContext(ctx, "failed to stop signoz", "error", err)
return err
}

View File

@@ -5,8 +5,6 @@ import (
"log/slog"
"time"
"github.com/spf13/cobra"
"github.com/SigNoz/signoz/cmd"
"github.com/SigNoz/signoz/ee/authn/callbackauthn/oidccallbackauthn"
"github.com/SigNoz/signoz/ee/authn/callbackauthn/samlcallbackauthn"
@@ -25,7 +23,6 @@ import (
"github.com/SigNoz/signoz/pkg/analytics"
"github.com/SigNoz/signoz/pkg/authn"
"github.com/SigNoz/signoz/pkg/authz"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/gateway"
"github.com/SigNoz/signoz/pkg/licensing"
@@ -41,6 +38,7 @@ import (
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/version"
"github.com/SigNoz/signoz/pkg/zeus"
"github.com/spf13/cobra"
)
func registerServer(parentCmd *cobra.Command, logger *slog.Logger) {
@@ -71,7 +69,7 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
// add enterprise sqlstore factories to the community sqlstore factories
sqlstoreFactories := signoz.NewSQLStoreProviderFactories()
if err := sqlstoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory(), sqlstorehook.NewInstrumentationFactory())); err != nil {
logger.ErrorContext(ctx, "failed to add postgressqlstore factory", errors.Attr(err))
logger.ErrorContext(ctx, "failed to add postgressqlstore factory", "error", err)
return err
}
@@ -134,37 +132,37 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
)
if err != nil {
logger.ErrorContext(ctx, "failed to create signoz", errors.Attr(err))
logger.ErrorContext(ctx, "failed to create signoz", "error", err)
return err
}
server, err := enterpriseapp.NewServer(config, signoz)
if err != nil {
logger.ErrorContext(ctx, "failed to create server", errors.Attr(err))
logger.ErrorContext(ctx, "failed to create server", "error", err)
return err
}
if err := server.Start(ctx); err != nil {
logger.ErrorContext(ctx, "failed to start server", errors.Attr(err))
logger.ErrorContext(ctx, "failed to start server", "error", err)
return err
}
signoz.Start(ctx)
if err := signoz.Wait(ctx); err != nil {
logger.ErrorContext(ctx, "failed to start signoz", errors.Attr(err))
logger.ErrorContext(ctx, "failed to start signoz", "error", err)
return err
}
err = server.Stop(ctx)
if err != nil {
logger.ErrorContext(ctx, "failed to stop server", errors.Attr(err))
logger.ErrorContext(ctx, "failed to stop server", "error", err)
return err
}
err = signoz.Stop(ctx)
if err != nil {
logger.ErrorContext(ctx, "failed to stop signoz", errors.Attr(err))
logger.ErrorContext(ctx, "failed to stop signoz", "error", err)
return err
}

View File

@@ -4,10 +4,8 @@ import (
"log/slog"
"os"
"github.com/spf13/cobra"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/version"
"github.com/spf13/cobra"
)
var RootCmd = &cobra.Command{
@@ -22,7 +20,7 @@ var RootCmd = &cobra.Command{
func Execute(logger *slog.Logger) {
err := RootCmd.Execute()
if err != nil {
logger.ErrorContext(RootCmd.Context(), "error running command", errors.Attr(err))
logger.ErrorContext(RootCmd.Context(), "error running command", "error", err)
os.Exit(1)
}
}

4
conf/cache-config.yml Normal file
View File

@@ -0,0 +1,4 @@
provider: "inmemory"
inmemory:
ttl: 60m
cleanupInterval: 10m

25
conf/prometheus.yml Normal file
View File

@@ -0,0 +1,25 @@
# my global config
global:
scrape_interval: 5s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
- 127.0.0.1:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
- 'alerts.yml'
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs: []
remote_read:
- url: tcp://localhost:9000/signoz_metrics

View File

@@ -2026,6 +2026,31 @@ components:
userId:
type: string
type: object
TypesDeprecatedUser:
properties:
createdAt:
format: date-time
type: string
displayName:
type: string
email:
type: string
id:
type: string
isRoot:
type: boolean
orgId:
type: string
role:
type: string
status:
type: string
updatedAt:
format: date-time
type: string
required:
- id
type: object
TypesGettableAPIKey:
properties:
createdAt:
@@ -2222,8 +2247,6 @@ components:
type: boolean
orgId:
type: string
role:
type: string
status:
type: string
updatedAt:
@@ -5077,7 +5100,7 @@ paths:
properties:
data:
items:
$ref: '#/components/schemas/TypesUser'
$ref: '#/components/schemas/TypesDeprecatedUser'
type: array
status:
type: string
@@ -5175,7 +5198,7 @@ paths:
schema:
properties:
data:
$ref: '#/components/schemas/TypesUser'
$ref: '#/components/schemas/TypesDeprecatedUser'
status:
type: string
required:
@@ -5229,7 +5252,7 @@ paths:
content:
application/json:
schema:
$ref: '#/components/schemas/TypesUser'
$ref: '#/components/schemas/TypesDeprecatedUser'
responses:
"200":
content:
@@ -5237,7 +5260,7 @@ paths:
schema:
properties:
data:
$ref: '#/components/schemas/TypesUser'
$ref: '#/components/schemas/TypesDeprecatedUser'
status:
type: string
required:
@@ -5295,7 +5318,7 @@ paths:
schema:
properties:
data:
$ref: '#/components/schemas/TypesUser'
$ref: '#/components/schemas/TypesDeprecatedUser'
status:
type: string
required:

View File

@@ -74,37 +74,37 @@ func (p *BaseSeasonalProvider) getResults(ctx context.Context, orgID valuer.UUID
instrumentationtypes.CodeFunctionName: "getResults",
})
// TODO(srikanthccv): parallelize this?
p.logger.InfoContext(ctx, "fetching results for current period", slog.Any("anomaly_current_period_query", params.CurrentPeriodQuery))
p.logger.InfoContext(ctx, "fetching results for current period", "anomaly_current_period_query", params.CurrentPeriodQuery)
currentPeriodResults, err := p.querier.QueryRange(ctx, orgID, &params.CurrentPeriodQuery)
if err != nil {
return nil, err
}
p.logger.InfoContext(ctx, "fetching results for past period", slog.Any("anomaly_past_period_query", params.PastPeriodQuery))
p.logger.InfoContext(ctx, "fetching results for past period", "anomaly_past_period_query", params.PastPeriodQuery)
pastPeriodResults, err := p.querier.QueryRange(ctx, orgID, &params.PastPeriodQuery)
if err != nil {
return nil, err
}
p.logger.InfoContext(ctx, "fetching results for current season", slog.Any("anomaly_current_season_query", params.CurrentSeasonQuery))
p.logger.InfoContext(ctx, "fetching results for current season", "anomaly_current_season_query", params.CurrentSeasonQuery)
currentSeasonResults, err := p.querier.QueryRange(ctx, orgID, &params.CurrentSeasonQuery)
if err != nil {
return nil, err
}
p.logger.InfoContext(ctx, "fetching results for past season", slog.Any("anomaly_past_season_query", params.PastSeasonQuery))
p.logger.InfoContext(ctx, "fetching results for past season", "anomaly_past_season_query", params.PastSeasonQuery)
pastSeasonResults, err := p.querier.QueryRange(ctx, orgID, &params.PastSeasonQuery)
if err != nil {
return nil, err
}
p.logger.InfoContext(ctx, "fetching results for past 2 season", slog.Any("anomaly_past_2season_query", params.Past2SeasonQuery))
p.logger.InfoContext(ctx, "fetching results for past 2 season", "anomaly_past_2season_query", params.Past2SeasonQuery)
past2SeasonResults, err := p.querier.QueryRange(ctx, orgID, &params.Past2SeasonQuery)
if err != nil {
return nil, err
}
p.logger.InfoContext(ctx, "fetching results for past 3 season", slog.Any("anomaly_past_3season_query", params.Past3SeasonQuery))
p.logger.InfoContext(ctx, "fetching results for past 3 season", "anomaly_past_3season_query", params.Past3SeasonQuery)
past3SeasonResults, err := p.querier.QueryRange(ctx, orgID, &params.Past3SeasonQuery)
if err != nil {
return nil, err
@@ -212,17 +212,17 @@ func (p *BaseSeasonalProvider) getPredictedSeries(
if predictedValue < 0 {
// this should not happen (except when the data has extreme outliers)
// we will use the moving avg of the previous period series in this case
p.logger.WarnContext(ctx, "predicted value is less than 0 for series", slog.Float64("anomaly_predicted_value", predictedValue), slog.Any("anomaly_labels", series.Labels))
p.logger.WarnContext(ctx, "predicted value is less than 0 for series", "anomaly_predicted_value", predictedValue, "anomaly_labels", series.Labels)
predictedValue = p.getMovingAvg(prevSeries, movingAvgWindowSize, idx)
}
p.logger.DebugContext(ctx, "predicted value for series",
slog.Float64("anomaly_moving_avg", movingAvg),
slog.Float64("anomaly_avg", avg),
slog.Float64("anomaly_mean", mean),
slog.Any("anomaly_labels", series.Labels),
slog.Float64("anomaly_predicted_value", predictedValue),
slog.Float64("anomaly_curr", curr.Value),
"anomaly_moving_avg", movingAvg,
"anomaly_avg", avg,
"anomaly_mean", mean,
"anomaly_labels", series.Labels,
"anomaly_predicted_value", predictedValue,
"anomaly_curr", curr.Value,
)
predictedSeries.Values = append(predictedSeries.Values, &qbtypes.TimeSeriesValue{
Timestamp: curr.Timestamp,
@@ -412,7 +412,7 @@ func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, orgID valuer.UU
past3SeasonSeries := p.getMatchingSeries(ctx, past3SeasonResult, series)
stdDev := p.getStdDev(currentSeasonSeries)
p.logger.InfoContext(ctx, "calculated standard deviation for series", slog.Float64("anomaly_std_dev", stdDev), slog.Any("anomaly_labels", series.Labels))
p.logger.InfoContext(ctx, "calculated standard deviation for series", "anomaly_std_dev", stdDev, "anomaly_labels", series.Labels)
prevSeriesAvg := p.getAvg(pastPeriodSeries)
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
@@ -420,12 +420,12 @@ func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, orgID valuer.UU
past2SeasonSeriesAvg := p.getAvg(past2SeasonSeries)
past3SeasonSeriesAvg := p.getAvg(past3SeasonSeries)
p.logger.InfoContext(ctx, "calculated mean for series",
slog.Float64("anomaly_prev_series_avg", prevSeriesAvg),
slog.Float64("anomaly_current_season_series_avg", currentSeasonSeriesAvg),
slog.Float64("anomaly_past_season_series_avg", pastSeasonSeriesAvg),
slog.Float64("anomaly_past_2season_series_avg", past2SeasonSeriesAvg),
slog.Float64("anomaly_past_3season_series_avg", past3SeasonSeriesAvg),
slog.Any("anomaly_labels", series.Labels),
"anomaly_prev_series_avg", prevSeriesAvg,
"anomaly_current_season_series_avg", currentSeasonSeriesAvg,
"anomaly_past_season_series_avg", pastSeasonSeriesAvg,
"anomaly_past_2season_series_avg", past2SeasonSeriesAvg,
"anomaly_past_3season_series_avg", past3SeasonSeriesAvg,
"anomaly_labels", series.Labels,
)
predictedSeries := p.getPredictedSeries(

View File

@@ -3,7 +3,6 @@ package oidccallbackauthn
import (
"context"
"fmt"
"log/slog"
"net/url"
"github.com/SigNoz/signoz/pkg/authn"
@@ -151,7 +150,7 @@ func (a *AuthN) HandleCallback(ctx context.Context, query url.Values) (*authtype
// Some IDPs return a single group as a string instead of an array
groups = append(groups, g)
default:
a.settings.Logger().WarnContext(ctx, "oidc: unsupported groups type", slog.String("type", fmt.Sprintf("%T", claimValue)))
a.settings.Logger().WarnContext(ctx, "oidc: unsupported groups type", "type", fmt.Sprintf("%T", claimValue))
}
}
}

View File

@@ -3,11 +3,8 @@ package httplicensing
import (
"context"
"encoding/json"
"log/slog"
"time"
"github.com/tidwall/gjson"
"github.com/SigNoz/signoz/ee/licensing/licensingstore/sqllicensingstore"
"github.com/SigNoz/signoz/pkg/analytics"
"github.com/SigNoz/signoz/pkg/errors"
@@ -19,6 +16,7 @@ import (
"github.com/SigNoz/signoz/pkg/types/licensetypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/SigNoz/signoz/pkg/zeus"
"github.com/tidwall/gjson"
)
type provider struct {
@@ -57,7 +55,7 @@ func (provider *provider) Start(ctx context.Context) error {
err := provider.Validate(ctx)
if err != nil {
provider.settings.Logger().ErrorContext(ctx, "failed to validate license from upstream server", errors.Attr(err))
provider.settings.Logger().ErrorContext(ctx, "failed to validate license from upstream server", "error", err)
}
for {
@@ -67,7 +65,7 @@ func (provider *provider) Start(ctx context.Context) error {
case <-tick.C:
err := provider.Validate(ctx)
if err != nil {
provider.settings.Logger().ErrorContext(ctx, "failed to validate license from upstream server", errors.Attr(err))
provider.settings.Logger().ErrorContext(ctx, "failed to validate license from upstream server", "error", err)
}
}
}
@@ -135,7 +133,7 @@ func (provider *provider) Refresh(ctx context.Context, organizationID valuer.UUI
if errors.Ast(err, errors.TypeNotFound) {
return nil
}
provider.settings.Logger().ErrorContext(ctx, "license validation failed", slog.String("org_id", organizationID.StringValue()))
provider.settings.Logger().ErrorContext(ctx, "license validation failed", "org_id", organizationID.StringValue())
return err
}

View File

@@ -5,7 +5,6 @@ import (
"context"
"encoding/json"
"io"
"log/slog"
"net/http"
"runtime/debug"
@@ -62,10 +61,10 @@ func (h *handler) QueryRange(rw http.ResponseWriter, req *http.Request) {
queryJSON, _ := json.Marshal(queryRangeRequest)
h.set.Logger.ErrorContext(ctx, "panic in QueryRange",
slog.Any("error", r),
slog.Any("user", claims.UserID),
slog.String("payload", string(queryJSON)),
slog.String("stacktrace", stackTrace),
"error", r,
"user", claims.UserID,
"payload", string(queryJSON),
"stacktrace", stackTrace,
)
render.Error(rw, errors.NewInternalf(

View File

@@ -10,6 +10,8 @@ import (
"strings"
"time"
"log/slog"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/modules/user"
@@ -18,7 +20,6 @@ import (
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/gorilla/mux"
"log/slog"
)
type CloudIntegrationConnectionParamsResponse struct {
@@ -126,7 +127,7 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
))
}
allPats, err := ah.Signoz.Modules.User.ListAPIKeys(ctx, orgIdUUID)
allPats, err := ah.Signoz.Modules.UserSetter.ListAPIKeys(ctx, orgIdUUID)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't list PATs: %w", err,
@@ -154,7 +155,7 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
))
}
err = ah.Signoz.Modules.User.CreateAPIKey(ctx, newPAT)
err = ah.Signoz.Modules.UserSetter.CreateAPIKey(ctx, newPAT)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't create cloud integration PAT: %w", err,
@@ -169,14 +170,19 @@ func (ah *APIHandler) getOrCreateCloudIntegrationUser(
cloudIntegrationUserName := fmt.Sprintf("%s-integration", cloudProvider)
email := valuer.MustNewEmail(fmt.Sprintf("%s@signoz.io", cloudIntegrationUserName))
cloudIntegrationUser, err := types.NewUser(cloudIntegrationUserName, email, types.RoleViewer, valuer.MustNewUUID(orgId), types.UserStatusActive)
cloudIntegrationUser, err := types.NewUser(cloudIntegrationUserName, email, valuer.MustNewUUID(orgId), types.UserStatusActive)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf("couldn't create cloud integration user: %w", err))
}
password := types.MustGenerateFactorPassword(cloudIntegrationUser.ID.StringValue())
cloudIntegrationUser, err = ah.Signoz.Modules.User.GetOrCreateUser(ctx, cloudIntegrationUser, user.WithFactorPassword(password))
cloudIntegrationUser, err = ah.Signoz.Modules.UserSetter.GetOrCreateUser(
ctx,
cloudIntegrationUser,
user.WithFactorPassword(password),
user.WithRoleNames([]string{authtypes.SigNozViewerRoleName}),
)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf("couldn't look for integration user: %w", err))
}

View File

@@ -4,15 +4,10 @@ import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"time"
signozerrors "github.com/SigNoz/signoz/pkg/errors"
"log/slog"
"github.com/SigNoz/signoz/ee/query-service/constants"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/http/render"
@@ -20,6 +15,7 @@ import (
"github.com/SigNoz/signoz/pkg/types/featuretypes"
"github.com/SigNoz/signoz/pkg/types/licensetypes"
"github.com/SigNoz/signoz/pkg/valuer"
"log/slog"
)
func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
@@ -42,7 +38,7 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
slog.DebugContext(ctx, "fetching license")
license, err := ah.Signoz.Licensing.GetActive(ctx, orgID)
if err != nil {
slog.ErrorContext(ctx, "failed to fetch license", signozerrors.Attr(err))
slog.ErrorContext(ctx, "failed to fetch license", "error", err)
} else if license == nil {
slog.DebugContext(ctx, "no active license found")
} else {
@@ -55,7 +51,7 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
// merge featureSet and zeusFeatures in featureSet with higher priority to zeusFeatures
featureSet = MergeFeatureSets(zeusFeatures, featureSet)
} else {
slog.ErrorContext(ctx, "failed to fetch zeus features", signozerrors.Attr(err))
slog.ErrorContext(ctx, "failed to fetch zeus features", "error", err)
}
}
}

View File

@@ -7,7 +7,6 @@ import (
"net/http"
"github.com/SigNoz/signoz/ee/query-service/anomaly"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/render"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
@@ -36,7 +35,7 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
queryRangeParams, apiErrorObj := baseapp.ParseQueryRangeParams(r)
if apiErrorObj != nil {
slog.ErrorContext(r.Context(), "error parsing metric query range params", errors.Attr(apiErrorObj.Err))
slog.ErrorContext(r.Context(), "error parsing metric query range params", "error", apiErrorObj.Err)
RespondError(w, apiErrorObj, nil)
return
}
@@ -45,7 +44,7 @@ func (aH *APIHandler) queryRangeV4(w http.ResponseWriter, r *http.Request) {
// add temporality for each metric
temporalityErr := aH.PopulateTemporality(r.Context(), orgID, queryRangeParams)
if temporalityErr != nil {
slog.ErrorContext(r.Context(), "error while adding temporality for metrics", errors.Attr(temporalityErr))
slog.ErrorContext(r.Context(), "error while adding temporality for metrics", "error", temporalityErr)
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
return
}

View File

@@ -8,21 +8,16 @@ import (
_ "net/http/pprof" // http profiler
"slices"
"go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux"
"go.opentelemetry.io/otel/propagation"
"github.com/SigNoz/signoz/pkg/cache/memorycache"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/queryparser"
"github.com/SigNoz/signoz/pkg/ruler/rulestore/sqlrulestore"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux"
"go.opentelemetry.io/otel/propagation"
"github.com/gorilla/handlers"
"github.com/rs/cors"
"github.com/soheilhy/cmux"
"github.com/SigNoz/signoz/ee/query-service/app/api"
"github.com/SigNoz/signoz/ee/query-service/rules"
"github.com/SigNoz/signoz/ee/query-service/usage"
@@ -36,8 +31,8 @@ import (
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/web"
"log/slog"
"github.com/rs/cors"
"github.com/soheilhy/cmux"
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
@@ -52,6 +47,7 @@ import (
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
"github.com/SigNoz/signoz/pkg/query-service/utils"
"log/slog"
)
// Server runs HTTP, Mux and a grpc server
@@ -308,7 +304,7 @@ func (s *Server) Start(ctx context.Context) error {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
// normal exit, nothing to do
default:
slog.Error("Could not start HTTP server", errors.Attr(err))
slog.Error("Could not start HTTP server", "error", err)
}
s.unavailableChannel <- healthcheck.Unavailable
}()
@@ -318,7 +314,7 @@ func (s *Server) Start(ctx context.Context) error {
err = http.ListenAndServe(baseconst.DebugHttpPort, nil)
if err != nil {
slog.Error("Could not start pprof server", errors.Attr(err))
slog.Error("Could not start pprof server", "error", err)
}
}()
@@ -326,7 +322,7 @@ func (s *Server) Start(ctx context.Context) error {
slog.Info("Starting OpAmp Websocket server", "addr", baseconst.OpAmpWsEndpoint)
err := s.opampServer.Start(baseconst.OpAmpWsEndpoint)
if err != nil {
slog.Error("opamp ws server failed to start", errors.Attr(err))
slog.Error("opamp ws server failed to start", "error", err)
s.unavailableChannel <- healthcheck.Unavailable
}
}()

View File

@@ -12,7 +12,6 @@ import (
"github.com/SigNoz/signoz/ee/query-service/anomaly"
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/common"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/transition"
@@ -309,7 +308,7 @@ func (r *AnomalyRule) buildAndRunQueryV5(ctx context.Context, orgID valuer.UUID,
filteredSeries, filterErr := r.BaseRule.FilterNewSeries(ctx, ts, seriesToProcess)
// In case of error we log the error and continue with the original series
if filterErr != nil {
r.logger.ErrorContext(ctx, "Error filtering new series, ", errors.Attr(filterErr), "rule_name", r.Name())
r.logger.ErrorContext(ctx, "Error filtering new series, ", "error", filterErr, "rule_name", r.Name())
} else {
seriesToProcess = filteredSeries
}
@@ -392,7 +391,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (int, error) {
result, err := tmpl.Expand()
if err != nil {
result = fmt.Sprintf("<error expanding template: %s>", err)
r.logger.ErrorContext(ctx, "Expanding alert template failed", errors.Attr(err), "data", tmplData, "rule_name", r.Name())
r.logger.ErrorContext(ctx, "Expanding alert template failed", "error", err, "data", tmplData, "rule_name", r.Name())
}
return result
}
@@ -468,7 +467,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (int, error) {
for fp, a := range r.Active {
labelsJSON, err := json.Marshal(a.QueryResultLables)
if err != nil {
r.logger.ErrorContext(ctx, "error marshaling labels", errors.Attr(err), "labels", a.Labels)
r.logger.ErrorContext(ctx, "error marshaling labels", "error", err, "labels", a.Labels)
}
if _, ok := resultFPs[fp]; !ok {
// If the alert was previously firing, keep it around for a given

View File

@@ -6,16 +6,14 @@ import (
"time"
"log/slog"
"github.com/google/uuid"
"github.com/SigNoz/signoz/pkg/errors"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
"github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/google/uuid"
"log/slog"
)
func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error) {
@@ -153,7 +151,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
)
if err != nil {
slog.Error("failed to prepare a new threshold rule for test", "name", alertname, errors.Attr(err))
slog.Error("failed to prepare a new threshold rule for test", "name", alertname, "error", err)
return 0, basemodel.BadRequest(err)
}
@@ -175,7 +173,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
)
if err != nil {
slog.Error("failed to prepare a new promql rule for test", "name", alertname, errors.Attr(err))
slog.Error("failed to prepare a new promql rule for test", "name", alertname, "error", err)
return 0, basemodel.BadRequest(err)
}
} else if parsedRule.RuleType == ruletypes.RuleTypeAnomaly {
@@ -195,7 +193,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
baserules.WithMetadataStore(opts.ManagerOpts.MetadataStore),
)
if err != nil {
slog.Error("failed to prepare a new anomaly rule for test", "name", alertname, errors.Attr(err))
slog.Error("failed to prepare a new anomaly rule for test", "name", alertname, "error", err)
return 0, basemodel.BadRequest(err)
}
} else {
@@ -207,7 +205,7 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
alertsFound, err := rule.Eval(ctx, ts)
if err != nil {
slog.Error("evaluating rule failed", "rule", rule.Name(), errors.Attr(err))
slog.Error("evaluating rule failed", "rule", rule.Name(), "error", err)
return 0, basemodel.InternalError(fmt.Errorf("rule evaluation failed"))
}
rule.SendAlerts(ctx, ts, 0, time.Minute, opts.NotifyFunc)

View File

@@ -15,7 +15,6 @@ import (
"github.com/google/uuid"
"github.com/SigNoz/signoz/ee/query-service/model"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/query-service/utils/encryption"
@@ -77,14 +76,14 @@ func (lm *Manager) Start(ctx context.Context) error {
func (lm *Manager) UploadUsage(ctx context.Context) {
organizations, err := lm.orgGetter.ListByOwnedKeyRange(ctx)
if err != nil {
slog.ErrorContext(ctx, "failed to get organizations", errors.Attr(err))
slog.ErrorContext(ctx, "failed to get organizations", "error", err)
return
}
for _, organization := range organizations {
// check if license is present or not
license, err := lm.licenseService.GetActive(ctx, organization.ID)
if err != nil {
slog.ErrorContext(ctx, "failed to get active license", errors.Attr(err))
slog.ErrorContext(ctx, "failed to get active license", "error", err)
return
}
if license == nil {
@@ -116,7 +115,7 @@ func (lm *Manager) UploadUsage(ctx context.Context) {
dbusages := []model.UsageDB{}
err := lm.clickhouseConn.Select(ctx, &dbusages, fmt.Sprintf(query, db, db), time.Now().Add(-(24 * time.Hour)))
if err != nil && !strings.Contains(err.Error(), "doesn't exist") {
slog.ErrorContext(ctx, "failed to get usage from clickhouse", errors.Attr(err))
slog.ErrorContext(ctx, "failed to get usage from clickhouse", "error", err)
return
}
for _, u := range dbusages {
@@ -136,14 +135,14 @@ func (lm *Manager) UploadUsage(ctx context.Context) {
for _, usage := range usages {
usageDataBytes, err := encryption.Decrypt([]byte(usage.ExporterID[:32]), []byte(usage.Data))
if err != nil {
slog.ErrorContext(ctx, "error while decrypting usage data", errors.Attr(err))
slog.ErrorContext(ctx, "error while decrypting usage data", "error", err)
return
}
usageData := model.Usage{}
err = json.Unmarshal(usageDataBytes, &usageData)
if err != nil {
slog.ErrorContext(ctx, "error while unmarshalling usage data", errors.Attr(err))
slog.ErrorContext(ctx, "error while unmarshalling usage data", "error", err)
return
}
@@ -164,13 +163,13 @@ func (lm *Manager) UploadUsage(ctx context.Context) {
body, errv2 := json.Marshal(payload)
if errv2 != nil {
slog.ErrorContext(ctx, "error while marshalling usage payload", errors.Attr(errv2))
slog.ErrorContext(ctx, "error while marshalling usage payload", "error", errv2)
return
}
errv2 = lm.zeus.PutMeters(ctx, payload.LicenseKey.String(), body)
if errv2 != nil {
slog.ErrorContext(ctx, "failed to upload usage", errors.Attr(errv2))
slog.ErrorContext(ctx, "failed to upload usage", "error", errv2)
// not returning error here since it is captured in the failed count
return
}

View File

@@ -4,12 +4,11 @@ import (
"context"
"database/sql"
"github.com/uptrace/bun"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
)
type provider struct {
@@ -114,7 +113,7 @@ WHERE
defer func() {
if err := constraintsRows.Close(); err != nil {
provider.settings.Logger().ErrorContext(ctx, "error closing rows", errors.Attr(err))
provider.settings.Logger().ErrorContext(ctx, "error closing rows", "error", err)
}
}()
@@ -175,7 +174,7 @@ WHERE
defer func() {
if err := foreignKeyConstraintsRows.Close(); err != nil {
provider.settings.Logger().ErrorContext(ctx, "error closing rows", errors.Attr(err))
provider.settings.Logger().ErrorContext(ctx, "error closing rows", "error", err)
}
}()
@@ -244,7 +243,7 @@ ORDER BY index_name, column_position`, string(name))
defer func() {
if err := rows.Close(); err != nil {
provider.settings.Logger().ErrorContext(ctx, "error closing rows", errors.Attr(err))
provider.settings.Logger().ErrorContext(ctx, "error closing rows", "error", err)
}
}()

View File

@@ -2384,6 +2384,47 @@ export interface TypesChangePasswordRequestDTO {
userId?: string;
}
export interface TypesDeprecatedUserDTO {
/**
* @type string
* @format date-time
*/
createdAt?: Date;
/**
* @type string
*/
displayName?: string;
/**
* @type string
*/
email?: string;
/**
* @type string
*/
id: string;
/**
* @type boolean
*/
isRoot?: boolean;
/**
* @type string
*/
orgId?: string;
/**
* @type string
*/
role?: string;
/**
* @type string
*/
status?: string;
/**
* @type string
* @format date-time
*/
updatedAt?: Date;
}
export interface TypesGettableAPIKeyDTO {
/**
* @type string
@@ -2682,10 +2723,6 @@ export interface TypesUserDTO {
* @type string
*/
orgId?: string;
/**
* @type string
*/
role?: string;
/**
* @type string
*/
@@ -3266,7 +3303,7 @@ export type ListUsers200 = {
/**
* @type array
*/
data: TypesUserDTO[];
data: TypesDeprecatedUserDTO[];
/**
* @type string
*/
@@ -3280,7 +3317,7 @@ export type GetUserPathParameters = {
id: string;
};
export type GetUser200 = {
data: TypesUserDTO;
data: TypesDeprecatedUserDTO;
/**
* @type string
*/
@@ -3291,7 +3328,7 @@ export type UpdateUserPathParameters = {
id: string;
};
export type UpdateUser200 = {
data: TypesUserDTO;
data: TypesDeprecatedUserDTO;
/**
* @type string
*/
@@ -3299,7 +3336,7 @@ export type UpdateUser200 = {
};
export type GetMyUser200 = {
data: TypesUserDTO;
data: TypesDeprecatedUserDTO;
/**
* @type string
*/

View File

@@ -34,13 +34,13 @@ import type {
RenderErrorResponseDTO,
RevokeAPIKeyPathParameters,
TypesChangePasswordRequestDTO,
TypesDeprecatedUserDTO,
TypesPostableAPIKeyDTO,
TypesPostableBulkInviteRequestDTO,
TypesPostableForgotPasswordDTO,
TypesPostableInviteDTO,
TypesPostableResetPasswordDTO,
TypesStorableAPIKeyDTO,
TypesUserDTO,
UpdateAPIKeyPathParameters,
UpdateUser200,
UpdateUserPathParameters,
@@ -1093,13 +1093,13 @@ export const invalidateGetUser = async (
*/
export const updateUser = (
{ id }: UpdateUserPathParameters,
typesUserDTO: BodyType<TypesUserDTO>,
typesDeprecatedUserDTO: BodyType<TypesDeprecatedUserDTO>,
) => {
return GeneratedAPIInstance<UpdateUser200>({
url: `/api/v1/user/${id}`,
method: 'PUT',
headers: { 'Content-Type': 'application/json' },
data: typesUserDTO,
data: typesDeprecatedUserDTO,
});
};
@@ -1110,13 +1110,19 @@ export const getUpdateUserMutationOptions = <
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof updateUser>>,
TError,
{ pathParams: UpdateUserPathParameters; data: BodyType<TypesUserDTO> },
{
pathParams: UpdateUserPathParameters;
data: BodyType<TypesDeprecatedUserDTO>;
},
TContext
>;
}): UseMutationOptions<
Awaited<ReturnType<typeof updateUser>>,
TError,
{ pathParams: UpdateUserPathParameters; data: BodyType<TypesUserDTO> },
{
pathParams: UpdateUserPathParameters;
data: BodyType<TypesDeprecatedUserDTO>;
},
TContext
> => {
const mutationKey = ['updateUser'];
@@ -1130,7 +1136,10 @@ export const getUpdateUserMutationOptions = <
const mutationFn: MutationFunction<
Awaited<ReturnType<typeof updateUser>>,
{ pathParams: UpdateUserPathParameters; data: BodyType<TypesUserDTO> }
{
pathParams: UpdateUserPathParameters;
data: BodyType<TypesDeprecatedUserDTO>;
}
> = (props) => {
const { pathParams, data } = props ?? {};
@@ -1143,7 +1152,7 @@ export const getUpdateUserMutationOptions = <
export type UpdateUserMutationResult = NonNullable<
Awaited<ReturnType<typeof updateUser>>
>;
export type UpdateUserMutationBody = BodyType<TypesUserDTO>;
export type UpdateUserMutationBody = BodyType<TypesDeprecatedUserDTO>;
export type UpdateUserMutationError = ErrorType<RenderErrorResponseDTO>;
/**
@@ -1156,13 +1165,19 @@ export const useUpdateUser = <
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof updateUser>>,
TError,
{ pathParams: UpdateUserPathParameters; data: BodyType<TypesUserDTO> },
{
pathParams: UpdateUserPathParameters;
data: BodyType<TypesDeprecatedUserDTO>;
},
TContext
>;
}): UseMutationResult<
Awaited<ReturnType<typeof updateUser>>,
TError,
{ pathParams: UpdateUserPathParameters; data: BodyType<TypesUserDTO> },
{
pathParams: UpdateUserPathParameters;
data: BodyType<TypesDeprecatedUserDTO>;
},
TContext
> => {
const mutationOptions = getUpdateUserMutationOptions(options);

View File

@@ -700,17 +700,11 @@
.ant-btn {
box-shadow: none;
position: relative;
}
.tab {
border: 1px solid var(--bg-slate-400);
width: 114px;
z-index: 1;
}
.tab:hover {
z-index: 3;
}
.tab::before {
@@ -721,7 +715,6 @@
background: var(--bg-slate-400);
color: var(--text-vanilla-100);
border: 1px solid var(--bg-slate-400);
z-index: 2;
}
.selected_view::before {

View File

@@ -144,10 +144,6 @@
background: var(--bg-ink-300);
}
.expanded-clickable-row {
cursor: pointer;
}
.error-rate {
width: 120px;
}

View File

@@ -87,14 +87,14 @@ func (batcher *Batcher) Add(ctx context.Context, alerts ...*alertmanagertypes.Po
// batch could be.
if d := len(alerts) - batcher.config.Capacity; d > 0 {
alerts = alerts[d:]
batcher.logger.WarnContext(ctx, "alert batch larger than queue capacity, dropping alerts", slog.Int("num_dropped", d), slog.Int("capacity", batcher.config.Capacity))
batcher.logger.WarnContext(ctx, "alert batch larger than queue capacity, dropping alerts", "num_dropped", d, "capacity", batcher.config.Capacity)
}
// If the queue is full, remove the oldest alerts in favor
// of newer ones.
if d := (len(batcher.queue) + len(alerts)) - batcher.config.Capacity; d > 0 {
batcher.queue = batcher.queue[d:]
batcher.logger.WarnContext(ctx, "alert batch queue full, dropping alerts", slog.Int("num_dropped", d))
batcher.logger.WarnContext(ctx, "alert batch queue full, dropping alerts", "num_dropped", d)
}
batcher.queue = append(batcher.queue, alerts...)

View File

@@ -112,7 +112,7 @@ func (n *Notifier) Notify(ctx context.Context, as ...*types.Alert) (bool, error)
return false, err
}
n.logger.DebugContext(ctx, "extracted group key", slog.String("key", string(key)))
n.logger.DebugContext(ctx, "extracted group key", "key", key)
data := notify.GetTemplateData(ctx, n.tmpl, as, n.logger)
tmpl := notify.TmplText(n.tmpl, data, &err)

View File

@@ -21,7 +21,7 @@ func NewReceiverIntegrations(nc alertmanagertypes.Receiver, tmpl *template.Templ
errs types.MultiError
integrations []notify.Integration
add = func(name string, i int, rs notify.ResolvedSender, f func(l *slog.Logger) (notify.Notifier, error)) {
n, err := f(logger.With(slog.String("integration", name)))
n, err := f(logger.With("integration", name))
if err != nil {
errs.Add(err)
return

View File

@@ -74,7 +74,7 @@ func NewDispatcher(
route: r,
marker: mk,
timeout: to,
logger: l.With(slog.String("component", "signoz-dispatcher")),
logger: l.With("component", "signoz-dispatcher"),
metrics: m,
limits: lim,
notificationManager: n,
@@ -111,24 +111,24 @@ func (d *Dispatcher) run(it provider.AlertIterator) {
if !ok || alertWrapper == nil {
// Iterator exhausted for some reason.
if err := it.Err(); err != nil {
d.logger.ErrorContext(d.ctx, "Error on alert update", errors.Attr(err))
d.logger.ErrorContext(d.ctx, "Error on alert update", "err", err)
}
return
}
alert := alertWrapper.Data
d.logger.DebugContext(d.ctx, "SigNoz Custom Dispatcher: Received alert", slog.Any("alert", alert))
d.logger.DebugContext(d.ctx, "SigNoz Custom Dispatcher: Received alert", "alert", alert)
// Log errors but keep trying.
if err := it.Err(); err != nil {
d.logger.ErrorContext(d.ctx, "Error on alert update", errors.Attr(err))
d.logger.ErrorContext(d.ctx, "Error on alert update", "err", err)
continue
}
now := time.Now()
channels, err := d.notificationManager.Match(d.ctx, d.orgID, getRuleIDFromAlert(alert), alert.Labels)
if err != nil {
d.logger.ErrorContext(d.ctx, "Error on alert match", errors.Attr(err))
d.logger.ErrorContext(d.ctx, "Error on alert match", "err", err)
continue
}
for _, channel := range channels {
@@ -278,7 +278,7 @@ func (d *Dispatcher) processAlert(alert *types.Alert, route *dispatch.Route) {
ruleId := getRuleIDFromAlert(alert)
config, err := d.notificationManager.GetNotificationConfig(d.orgID, ruleId)
if err != nil {
d.logger.ErrorContext(d.ctx, "error getting alert notification config", slog.String("rule_id", ruleId), errors.Attr(err))
d.logger.ErrorContext(d.ctx, "error getting alert notification config", "rule_id", ruleId, "error", err)
return
}
renotifyInterval := config.Renotify.RenotifyInterval
@@ -310,7 +310,7 @@ func (d *Dispatcher) processAlert(alert *types.Alert, route *dispatch.Route) {
// If the group does not exist, create it. But check the limit first.
if limit := d.limits.MaxNumberOfAggregationGroups(); limit > 0 && d.aggrGroupsNum >= limit {
d.metrics.aggrGroupLimitReached.Inc()
d.logger.ErrorContext(d.ctx, "Too many aggregation groups, cannot create new group for alert", slog.Int("groups", d.aggrGroupsNum), slog.Int("limit", limit), slog.String("alert", alert.Name()))
d.logger.ErrorContext(d.ctx, "Too many aggregation groups, cannot create new group for alert", "groups", d.aggrGroupsNum, "limit", limit, "alert", alert.Name())
return
}
@@ -328,7 +328,7 @@ func (d *Dispatcher) processAlert(alert *types.Alert, route *dispatch.Route) {
go ag.run(func(ctx context.Context, alerts ...*types.Alert) bool {
_, _, err := d.stage.Exec(ctx, d.logger, alerts...)
if err != nil {
logger := d.logger.With(slog.Int("num_alerts", len(alerts)), errors.Attr(err))
logger := d.logger.With("num_alerts", len(alerts), "err", err)
if errors.Is(ctx.Err(), context.Canceled) {
// It is expected for the context to be canceled on
// configuration reload or shutdown. In this case, the
@@ -382,7 +382,7 @@ func newAggrGroup(ctx context.Context, labels model.LabelSet, r *dispatch.Route,
}
ag.ctx, ag.cancel = context.WithCancel(ctx)
ag.logger = logger.With(slog.Any("aggr_group", ag))
ag.logger = logger.With("aggr_group", ag)
// Set an initial one-time wait before flushing
// the first batch of notifications.
@@ -457,7 +457,7 @@ func (ag *aggrGroup) stop() {
// insert inserts the alert into the aggregation group.
func (ag *aggrGroup) insert(alert *types.Alert) {
if err := ag.alerts.Set(alert); err != nil {
ag.logger.ErrorContext(ag.ctx, "error on set alert", errors.Attr(err))
ag.logger.ErrorContext(ag.ctx, "error on set alert", "err", err)
}
// Immediately trigger a flush if the wait duration for this
@@ -497,7 +497,7 @@ func (ag *aggrGroup) flush(notify func(...*types.Alert) bool) {
}
sort.Stable(alertsSlice)
ag.logger.DebugContext(ag.ctx, "flushing", slog.String("alerts", fmt.Sprintf("%v", alertsSlice)))
ag.logger.DebugContext(ag.ctx, "flushing", "alerts", fmt.Sprintf("%v", alertsSlice))
if notify(alertsSlice...) {
// Delete all resolved alerts as we just sent a notification for them,
@@ -505,7 +505,7 @@ func (ag *aggrGroup) flush(notify func(...*types.Alert) bool) {
// that each resolved alert has not fired again during the flush as then
// we would delete an active alert thinking it was resolved.
if err := ag.alerts.DeleteIfNotModified(resolvedSlice); err != nil {
ag.logger.ErrorContext(ag.ctx, "error on delete alerts", errors.Attr(err))
ag.logger.ErrorContext(ag.ctx, "error on delete alerts", "err", err)
}
}
}

View File

@@ -10,6 +10,10 @@ import (
"github.com/prometheus/alertmanager/types"
"golang.org/x/sync/errgroup"
"github.com/SigNoz/signoz/pkg/alertmanager/alertmanagernotify"
"github.com/SigNoz/signoz/pkg/alertmanager/nfmanager"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types/alertmanagertypes"
"github.com/prometheus/alertmanager/dispatch"
"github.com/prometheus/alertmanager/featurecontrol"
"github.com/prometheus/alertmanager/inhibit"
@@ -21,11 +25,6 @@ import (
"github.com/prometheus/alertmanager/timeinterval"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/SigNoz/signoz/pkg/alertmanager/alertmanagernotify"
"github.com/SigNoz/signoz/pkg/alertmanager/nfmanager"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types/alertmanagertypes"
)
var (
@@ -73,7 +72,7 @@ type Server struct {
func New(ctx context.Context, logger *slog.Logger, registry prometheus.Registerer, srvConfig Config, orgID string, stateStore alertmanagertypes.StateStore, nfManager nfmanager.NotificationManager) (*Server, error) {
server := &Server{
logger: logger.With(slog.String("pkg", "go.signoz.io/pkg/alertmanager/alertmanagerserver")),
logger: logger.With("pkg", "go.signoz.io/pkg/alertmanager/alertmanagerserver"),
registry: registry,
srvConfig: srvConfig,
orgID: orgID,
@@ -140,7 +139,7 @@ func New(ctx context.Context, logger *slog.Logger, registry prometheus.Registere
server.silences.Maintenance(server.srvConfig.Silences.MaintenanceInterval, snapfnoop, server.stopc, func() (int64, error) {
// Delete silences older than the retention period.
if _, err := server.silences.GC(); err != nil {
server.logger.ErrorContext(ctx, "silence garbage collection", errors.Attr(err))
server.logger.ErrorContext(ctx, "silence garbage collection", "error", err)
// Don't return here - we need to snapshot our state first.
}
@@ -169,7 +168,7 @@ func New(ctx context.Context, logger *slog.Logger, registry prometheus.Registere
defer server.wg.Done()
server.nflog.Maintenance(server.srvConfig.NFLog.MaintenanceInterval, snapfnoop, server.stopc, func() (int64, error) {
if _, err := server.nflog.GC(); err != nil {
server.logger.ErrorContext(ctx, "notification log garbage collection", errors.Attr(err))
server.logger.ErrorContext(ctx, "notification log garbage collection", "error", err)
// Don't return without saving the current state.
}
@@ -247,7 +246,7 @@ func (server *Server) SetConfig(ctx context.Context, alertmanagerConfig *alertma
for _, rcv := range config.Receivers {
if _, found := activeReceivers[rcv.Name]; !found {
// No need to build a receiver if no route is using it.
server.logger.InfoContext(ctx, "skipping creation of receiver not referenced by any route", slog.String("receiver", rcv.Name))
server.logger.InfoContext(ctx, "skipping creation of receiver not referenced by any route", "receiver", rcv.Name)
continue
}
integrations, err := alertmanagernotify.NewReceiverIntegrations(rcv, server.tmpl, server.logger)

View File

@@ -2,7 +2,6 @@ package rulebasednotification
import (
"context"
"log/slog"
"strings"
"sync"
@@ -267,7 +266,7 @@ func (r *provider) convertLabelSetToEnv(ctx context.Context, labelSet model.Labe
}
if logForReview {
r.settings.Logger().InfoContext(ctx, "found label set with conflicting prefix dotted keys", slog.Any("labels", labelSet))
r.settings.Logger().InfoContext(ctx, "found label set with conflicting prefix dotted keys", "labels", labelSet)
}
return env

View File

@@ -2,7 +2,6 @@ package alertmanager
import (
"context"
"log/slog"
"sync"
"github.com/prometheus/alertmanager/featurecontrol"
@@ -75,7 +74,7 @@ func (service *Service) SyncServers(ctx context.Context) error {
for _, org := range orgs {
config, _, err := service.getConfig(ctx, org.ID.StringValue())
if err != nil {
service.settings.Logger().ErrorContext(ctx, "failed to get alertmanager config for org", slog.String("org_id", org.ID.StringValue()), errors.Attr(err))
service.settings.Logger().ErrorContext(ctx, "failed to get alertmanager config for org", "org_id", org.ID.StringValue(), "error", err)
continue
}
@@ -83,7 +82,7 @@ func (service *Service) SyncServers(ctx context.Context) error {
if _, ok := service.servers[org.ID.StringValue()]; !ok {
server, err := service.newServer(ctx, org.ID.StringValue())
if err != nil {
service.settings.Logger().ErrorContext(ctx, "failed to create alertmanager server", slog.String("org_id", org.ID.StringValue()), errors.Attr(err))
service.settings.Logger().ErrorContext(ctx, "failed to create alertmanager server", "org_id", org.ID.StringValue(), "error", err)
continue
}
@@ -91,13 +90,13 @@ func (service *Service) SyncServers(ctx context.Context) error {
}
if service.servers[org.ID.StringValue()].Hash() == config.StoreableConfig().Hash {
service.settings.Logger().DebugContext(ctx, "skipping alertmanager sync for org", slog.String("org_id", org.ID.StringValue()), slog.String("hash", config.StoreableConfig().Hash))
service.settings.Logger().DebugContext(ctx, "skipping alertmanager sync for org", "org_id", org.ID.StringValue(), "hash", config.StoreableConfig().Hash)
continue
}
err = service.servers[org.ID.StringValue()].SetConfig(ctx, config)
if err != nil {
service.settings.Logger().ErrorContext(ctx, "failed to set config for alertmanager server", slog.String("org_id", org.ID.StringValue()), errors.Attr(err))
service.settings.Logger().ErrorContext(ctx, "failed to set config for alertmanager server", "org_id", org.ID.StringValue(), "error", err)
continue
}
}
@@ -164,7 +163,7 @@ func (service *Service) Stop(ctx context.Context) error {
for _, server := range service.servers {
if err := server.Stop(ctx); err != nil {
errs = append(errs, err)
service.settings.Logger().ErrorContext(ctx, "failed to stop alertmanager server", errors.Attr(err))
service.settings.Logger().ErrorContext(ctx, "failed to stop alertmanager server", "error", err)
}
}
@@ -192,7 +191,7 @@ func (service *Service) newServer(ctx context.Context, orgID string) (*alertmana
// defaults from an upstream upgrade or something similar) trigger a DB update
// so that other code paths reading directly from the store see the up-to-date config.
if storedHash == config.StoreableConfig().Hash {
service.settings.Logger().DebugContext(ctx, "skipping config store update for org", slog.String("org_id", orgID), slog.String("hash", config.StoreableConfig().Hash))
service.settings.Logger().DebugContext(ctx, "skipping config store update for org", "org_id", orgID, "hash", config.StoreableConfig().Hash)
return server, nil
}

View File

@@ -4,9 +4,8 @@ import (
"context"
"time"
"github.com/prometheus/common/model"
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
"github.com/prometheus/common/model"
amConfig "github.com/prometheus/alertmanager/config"
@@ -67,7 +66,7 @@ func New(ctx context.Context, providerSettings factory.ProviderSettings, config
func (provider *provider) Start(ctx context.Context) error {
if err := provider.service.SyncServers(ctx); err != nil {
provider.settings.Logger().ErrorContext(ctx, "failed to sync alertmanager servers", errors.Attr(err))
provider.settings.Logger().ErrorContext(ctx, "failed to sync alertmanager servers", "error", err)
return err
}
@@ -79,7 +78,7 @@ func (provider *provider) Start(ctx context.Context) error {
return nil
case <-ticker.C:
if err := provider.service.SyncServers(ctx); err != nil {
provider.settings.Logger().ErrorContext(ctx, "failed to sync alertmanager servers", errors.Attr(err))
provider.settings.Logger().ErrorContext(ctx, "failed to sync alertmanager servers", "error", err)
}
}
}

View File

@@ -2,10 +2,8 @@ package segmentanalytics
import (
"context"
"log/slog"
"github.com/SigNoz/signoz/pkg/analytics"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/types/analyticstypes"
segment "github.com/segmentio/analytics-go/v3"
@@ -47,14 +45,14 @@ func (provider *provider) Send(ctx context.Context, messages ...analyticstypes.M
for _, message := range messages {
err := provider.client.Enqueue(message)
if err != nil {
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", errors.Attr(err))
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", "err", err)
}
}
}
func (provider *provider) TrackGroup(ctx context.Context, group, event string, properties map[string]any) {
if properties == nil {
provider.settings.Logger().WarnContext(ctx, "empty attributes received, skipping event", slog.String("group", group), slog.String("event", event))
provider.settings.Logger().WarnContext(ctx, "empty attributes received, skipping event", "group", group, "event", event)
return
}
@@ -69,13 +67,13 @@ func (provider *provider) TrackGroup(ctx context.Context, group, event string, p
},
})
if err != nil {
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", errors.Attr(err))
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", "err", err)
}
}
func (provider *provider) TrackUser(ctx context.Context, group, user, event string, properties map[string]any) {
if properties == nil {
provider.settings.Logger().WarnContext(ctx, "empty attributes received, skipping event", slog.String("user", user), slog.String("group", group), slog.String("event", event))
provider.settings.Logger().WarnContext(ctx, "empty attributes received, skipping event", "user", user, "group", group, "event", event)
return
}
@@ -90,13 +88,13 @@ func (provider *provider) TrackUser(ctx context.Context, group, user, event stri
},
})
if err != nil {
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", errors.Attr(err))
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", "err", err)
}
}
func (provider *provider) IdentifyGroup(ctx context.Context, group string, traits map[string]any) {
if traits == nil {
provider.settings.Logger().WarnContext(ctx, "empty attributes received, skipping identify", slog.String("group", group))
provider.settings.Logger().WarnContext(ctx, "empty attributes received, skipping identify", "group", group)
return
}
@@ -106,7 +104,7 @@ func (provider *provider) IdentifyGroup(ctx context.Context, group string, trait
Traits: analyticstypes.NewTraitsFromMap(traits),
})
if err != nil {
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", errors.Attr(err))
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", "err", err)
}
// identify the group using the stats user
@@ -116,13 +114,13 @@ func (provider *provider) IdentifyGroup(ctx context.Context, group string, trait
Traits: analyticstypes.NewTraitsFromMap(traits),
})
if err != nil {
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", errors.Attr(err))
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", "err", err)
}
}
func (provider *provider) IdentifyUser(ctx context.Context, group, user string, traits map[string]any) {
if traits == nil {
provider.settings.Logger().WarnContext(ctx, "empty attributes received, skipping identify", slog.String("user", user), slog.String("group", group))
provider.settings.Logger().WarnContext(ctx, "empty attributes received, skipping identify", "user", user, "group", group)
return
}
@@ -132,7 +130,7 @@ func (provider *provider) IdentifyUser(ctx context.Context, group, user string,
Traits: analyticstypes.NewTraitsFromMap(traits),
})
if err != nil {
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", errors.Attr(err))
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", "err", err)
}
// associate the user with the group
@@ -142,13 +140,13 @@ func (provider *provider) IdentifyUser(ctx context.Context, group, user string,
Traits: analyticstypes.NewTraits().Set("id", group), // A trait is required
})
if err != nil {
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", errors.Attr(err))
provider.settings.Logger().WarnContext(ctx, "unable to send message to segment", "err", err)
}
}
func (provider *provider) Stop(ctx context.Context) error {
if err := provider.client.Close(); err != nil {
provider.settings.Logger().WarnContext(ctx, "unable to close segment client", errors.Attr(err))
provider.settings.Logger().WarnContext(ctx, "unable to close segment client", "err", err)
}
close(provider.stopC)

View File

@@ -118,7 +118,7 @@ func (provider *provider) addUserRoutes(router *mux.Router) error {
Description: "This endpoint lists all users",
Request: nil,
RequestContentType: "",
Response: make([]*types.GettableUser, 0),
Response: make([]*types.DeprecatedUser, 0),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{},
@@ -135,7 +135,7 @@ func (provider *provider) addUserRoutes(router *mux.Router) error {
Description: "This endpoint returns the user I belong to",
Request: nil,
RequestContentType: "",
Response: new(types.GettableUser),
Response: new(types.DeprecatedUser),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{},
@@ -152,7 +152,7 @@ func (provider *provider) addUserRoutes(router *mux.Router) error {
Description: "This endpoint returns the user by id",
Request: nil,
RequestContentType: "",
Response: new(types.GettableUser),
Response: new(types.DeprecatedUser),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusNotFound},
@@ -167,9 +167,9 @@ func (provider *provider) addUserRoutes(router *mux.Router) error {
Tags: []string{"users"},
Summary: "Update user",
Description: "This endpoint updates the user by id",
Request: new(types.User),
Request: new(types.DeprecatedUser),
RequestContentType: "application/json",
Response: new(types.GettableUser),
Response: new(types.DeprecatedUser),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound},

View File

@@ -17,7 +17,7 @@ func NewStore(sqlstore sqlstore.SQLStore) authtypes.AuthNStore {
return &store{sqlstore: sqlstore}
}
func (store *store) GetActiveUserAndFactorPasswordByEmailAndOrgID(ctx context.Context, email string, orgID valuer.UUID) (*types.User, *types.FactorPassword, error) {
func (store *store) GetActiveUserAndFactorPasswordByEmailAndOrgID(ctx context.Context, email string, orgID valuer.UUID) (*types.User, *types.FactorPassword, []*authtypes.UserRole, error) {
user := new(types.User)
factorPassword := new(types.FactorPassword)
@@ -31,7 +31,7 @@ func (store *store) GetActiveUserAndFactorPasswordByEmailAndOrgID(ctx context.Co
Where("status = ?", types.UserStatusActive.StringValue()).
Scan(ctx)
if err != nil {
return nil, nil, store.sqlstore.WrapNotFoundErrf(err, types.ErrCodeUserNotFound, "user with email %s in org %s not found", email, orgID)
return nil, nil, nil, store.sqlstore.WrapNotFoundErrf(err, types.ErrCodeUserNotFound, "user with email %s in org %s not found", email, orgID)
}
err = store.
@@ -42,10 +42,22 @@ func (store *store) GetActiveUserAndFactorPasswordByEmailAndOrgID(ctx context.Co
Where("user_id = ?", user.ID).
Scan(ctx)
if err != nil {
return nil, nil, store.sqlstore.WrapNotFoundErrf(err, types.ErrCodePasswordNotFound, "user with email %s in org %s does not have password", email, orgID)
return nil, nil, nil, store.sqlstore.WrapNotFoundErrf(err, types.ErrCodePasswordNotFound, "user with email %s in org %s does not have password", email, orgID)
}
return user, factorPassword, nil
userRoles := make([]*authtypes.UserRole, 0)
err = store.sqlstore.
BunDBCtx(ctx).
NewSelect().
Model(&userRoles).
Where("user_id = ?", user.ID).
Relation("Role").
Scan(ctx)
if err != nil {
return nil, nil, nil, store.sqlstore.WrapNotFoundErrf(err, types.ErrCodePasswordNotFound, "user with email %s in org %s does not have user role entries", email, orgID)
}
return user, factorPassword, userRoles, nil
}
func (store *store) GetAuthDomainFromID(ctx context.Context, domainID valuer.UUID) (*authtypes.AuthDomain, error) {

View File

@@ -2,21 +2,19 @@ package googlecallbackauthn
import (
"context"
"log/slog"
"net/url"
"github.com/coreos/go-oidc/v3/oidc"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
admin "google.golang.org/api/admin/directory/v1"
"google.golang.org/api/option"
"github.com/SigNoz/signoz/pkg/authn"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/http/client"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/coreos/go-oidc/v3/oidc"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
admin "google.golang.org/api/admin/directory/v1"
"google.golang.org/api/option"
)
const (
@@ -74,13 +72,13 @@ func (a *AuthN) HandleCallback(ctx context.Context, query url.Values) (*authtype
}
if err := query.Get("error"); err != "" {
a.settings.Logger().ErrorContext(ctx, "google: error while authenticating", slog.String("error", err), slog.String("error_description", query.Get("error_description")))
a.settings.Logger().ErrorContext(ctx, "google: error while authenticating", "error", err, "error_description", query.Get("error_description"))
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "google: error while authenticating").WithAdditional(query.Get("error_description"))
}
state, err := authtypes.NewStateFromString(query.Get("state"))
if err != nil {
a.settings.Logger().ErrorContext(ctx, "google: invalid state", errors.Attr(err))
a.settings.Logger().ErrorContext(ctx, "google: invalid state", "error", err)
return nil, errors.Newf(errors.TypeInvalidInput, authtypes.ErrCodeInvalidState, "google: invalid state").WithAdditional(err.Error())
}
@@ -94,11 +92,11 @@ func (a *AuthN) HandleCallback(ctx context.Context, query url.Values) (*authtype
if err != nil {
var retrieveError *oauth2.RetrieveError
if errors.As(err, &retrieveError) {
a.settings.Logger().ErrorContext(ctx, "google: failed to get token", errors.Attr(err), slog.String("error_description", retrieveError.ErrorDescription), slog.String("body", string(retrieveError.Body)))
a.settings.Logger().ErrorContext(ctx, "google: failed to get token", "error", err, "error_description", retrieveError.ErrorDescription, "body", string(retrieveError.Body))
return nil, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "google: failed to get token").WithAdditional(retrieveError.ErrorDescription)
}
a.settings.Logger().ErrorContext(ctx, "google: failed to get token", errors.Attr(err))
a.settings.Logger().ErrorContext(ctx, "google: failed to get token", "error", err)
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "google: failed to get token")
}
@@ -110,7 +108,7 @@ func (a *AuthN) HandleCallback(ctx context.Context, query url.Values) (*authtype
verifier := oidcProvider.Verifier(&oidc.Config{ClientID: authDomain.AuthDomainConfig().Google.ClientID})
idToken, err := verifier.Verify(ctx, rawIDToken)
if err != nil {
a.settings.Logger().ErrorContext(ctx, "google: failed to verify token", errors.Attr(err))
a.settings.Logger().ErrorContext(ctx, "google: failed to verify token", "error", err)
return nil, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "google: failed to verify token")
}
@@ -122,18 +120,18 @@ func (a *AuthN) HandleCallback(ctx context.Context, query url.Values) (*authtype
}
if err := idToken.Claims(&claims); err != nil {
a.settings.Logger().ErrorContext(ctx, "google: missing or invalid claims", errors.Attr(err))
a.settings.Logger().ErrorContext(ctx, "google: missing or invalid claims", "error", err)
return nil, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "google: missing or invalid claims").WithAdditional(err.Error())
}
if claims.HostedDomain != authDomain.StorableAuthDomain().Name {
a.settings.Logger().ErrorContext(ctx, "google: unexpected hd claim", slog.String("expected", authDomain.StorableAuthDomain().Name), slog.String("actual", claims.HostedDomain))
a.settings.Logger().ErrorContext(ctx, "google: unexpected hd claim", "expected", authDomain.StorableAuthDomain().Name, "actual", claims.HostedDomain)
return nil, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "google: unexpected hd claim")
}
if !authDomain.AuthDomainConfig().Google.InsecureSkipEmailVerified {
if !claims.EmailVerified {
a.settings.Logger().ErrorContext(ctx, "google: email is not verified", slog.String("email", claims.Email))
a.settings.Logger().ErrorContext(ctx, "google: email is not verified", "email", claims.Email)
return nil, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "google: email is not verified")
}
}
@@ -147,7 +145,7 @@ func (a *AuthN) HandleCallback(ctx context.Context, query url.Values) (*authtype
if authDomain.AuthDomainConfig().Google.FetchGroups {
groups, err = a.fetchGoogleWorkspaceGroups(ctx, claims.Email, authDomain.AuthDomainConfig().Google)
if err != nil {
a.settings.Logger().ErrorContext(ctx, "google: could not fetch groups", errors.Attr(err))
a.settings.Logger().ErrorContext(ctx, "google: could not fetch groups", "error", err)
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "google: could not fetch groups").WithAdditional(err.Error())
}
@@ -191,7 +189,7 @@ func (a *AuthN) fetchGoogleWorkspaceGroups(ctx context.Context, userEmail string
jwtConfig, err := google.JWTConfigFromJSON([]byte(config.ServiceAccountJSON), admin.AdminDirectoryGroupReadonlyScope)
if err != nil {
a.settings.Logger().ErrorContext(ctx, "google: invalid service account credentials", errors.Attr(err))
a.settings.Logger().ErrorContext(ctx, "google: invalid service account credentials", "error", err)
return nil, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "invalid service account credentials")
}
@@ -201,7 +199,7 @@ func (a *AuthN) fetchGoogleWorkspaceGroups(ctx context.Context, userEmail string
adminService, err := admin.NewService(ctx, option.WithHTTPClient(jwtConfig.Client(customCtx)))
if err != nil {
a.settings.Logger().ErrorContext(ctx, "google: unable to create directory service", errors.Attr(err))
a.settings.Logger().ErrorContext(ctx, "google: unable to create directory service", "error", err)
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "unable to create directory service")
}
@@ -223,7 +221,7 @@ func (a *AuthN) getGroups(ctx context.Context, adminService *admin.Service, user
groupList, err := call.Context(ctx).Do()
if err != nil {
a.settings.Logger().ErrorContext(ctx, "google: unable to list groups", errors.Attr(err))
a.settings.Logger().ErrorContext(ctx, "google: unable to list groups", "error", err)
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "unable to list groups")
}
@@ -238,7 +236,7 @@ func (a *AuthN) getGroups(ctx context.Context, adminService *admin.Service, user
if fetchTransitive {
transitiveGroups, err := a.getGroups(ctx, adminService, group.Email, fetchTransitive, checkedGroups)
if err != nil {
a.settings.Logger().ErrorContext(ctx, "google: unable to list transitive groups", errors.Attr(err))
a.settings.Logger().ErrorContext(ctx, "google: unable to list transitive groups", "error", err)
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "unable to list transitive groups")
}
userGroups = append(userGroups, transitiveGroups...)

View File

@@ -21,7 +21,7 @@ func New(store authtypes.AuthNStore) *AuthN {
}
func (a *AuthN) Authenticate(ctx context.Context, email string, password string, orgID valuer.UUID) (*authtypes.Identity, error) {
user, factorPassword, err := a.store.GetActiveUserAndFactorPasswordByEmailAndOrgID(ctx, email, orgID)
user, factorPassword, userRoles, err := a.store.GetActiveUserAndFactorPasswordByEmailAndOrgID(ctx, email, orgID)
if err != nil {
return nil, err
}
@@ -30,5 +30,7 @@ func (a *AuthN) Authenticate(ctx context.Context, email string, password string,
return nil, errors.New(errors.TypeUnauthenticated, types.ErrCodeIncorrectPassword, "invalid email or password")
}
return authtypes.NewIdentity(user.ID, orgID, user.Email, user.Role, authtypes.IdentNProviderTokenizer), nil
role := authtypes.SigNozManagedRoleToExistingLegacyRole[userRoles[0].Role.Name]
return authtypes.NewIdentity(user.ID, orgID, user.Email, role, authtypes.IdentNProviderTokenizer), nil
}

View File

@@ -2,19 +2,18 @@ package rediscache
import (
"context"
"fmt"
"log/slog"
"strings"
"time"
"github.com/redis/go-redis/extra/redisotel/v9"
"github.com/redis/go-redis/v9"
"fmt"
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/types/cachetypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/redis/go-redis/extra/redisotel/v9"
"github.com/redis/go-redis/v9"
)
type provider struct {
@@ -77,6 +76,6 @@ func (c *provider) DeleteMany(ctx context.Context, orgID valuer.UUID, cacheKeys
}
if err := c.client.Del(ctx, updatedCacheKeys...).Err(); err != nil {
c.settings.Logger().ErrorContext(ctx, "error deleting cache keys", slog.Any("cache_keys", cacheKeys), errors.Attr(err))
c.settings.Logger().ErrorContext(ctx, "error deleting cache keys", "cache_keys", cacheKeys, "error", err)
}
}

View File

@@ -2,7 +2,6 @@ package noopemailing
import (
"context"
"log/slog"
"github.com/SigNoz/signoz/pkg/emailing"
"github.com/SigNoz/signoz/pkg/factory"
@@ -25,6 +24,6 @@ func New(ctx context.Context, providerSettings factory.ProviderSettings, config
}
func (provider *provider) SendHTML(ctx context.Context, to string, subject string, templateName emailtypes.TemplateName, data map[string]any) error {
provider.settings.Logger().WarnContext(ctx, "using noop provider, no email will be sent", slog.String("to", to), slog.String("subject", subject))
provider.settings.Logger().WarnContext(ctx, "using noop provider, no email will be sent", "to", to, "subject", subject)
return nil
}

View File

@@ -6,7 +6,6 @@ import (
"github.com/SigNoz/signoz/pkg/emailing"
"github.com/SigNoz/signoz/pkg/emailing/templatestore/filetemplatestore"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/smtp/client"
"github.com/SigNoz/signoz/pkg/types/emailtypes"
@@ -29,7 +28,7 @@ func New(ctx context.Context, providerSettings factory.ProviderSettings, config
// Try to create a template store. If it fails, use an empty store.
store, err := filetemplatestore.NewStore(ctx, config.Templates.Directory, emailtypes.Templates, settings.Logger())
if err != nil {
settings.Logger().ErrorContext(ctx, "failed to create template store, using empty store", errors.Attr(err))
settings.Logger().ErrorContext(ctx, "failed to create template store, using empty store", "error", err)
store = filetemplatestore.NewEmptyStore()
}
@@ -88,7 +87,7 @@ func (provider *provider) SendHTML(ctx context.Context, to string, subject strin
content, err := emailtypes.NewContent(template, data)
if err != nil {
provider.settings.Logger().ErrorContext(ctx, "failed to create email content", errors.Attr(err))
provider.settings.Logger().ErrorContext(ctx, "failed to create email content", "error", err)
return err
}

View File

@@ -45,7 +45,7 @@ func NewStore(ctx context.Context, baseDir string, templates []emailtypes.Templa
t, err := parseTemplateFile(filepath.Join(baseDir, fi.Name()), templateName)
if err != nil {
logger.ErrorContext(ctx, "failed to parse template file", slog.Any("template", templateName), slog.String("path", filepath.Join(baseDir, fi.Name())), errors.Attr(err))
logger.ErrorContext(ctx, "failed to parse template file", "template", templateName, "path", filepath.Join(baseDir, fi.Name()), "error", err)
continue
}
@@ -54,7 +54,7 @@ func NewStore(ctx context.Context, baseDir string, templates []emailtypes.Templa
}
if err := checkMissingTemplates(templates, foundTemplates); err != nil {
logger.ErrorContext(ctx, "some templates are missing", errors.Attr(err))
logger.ErrorContext(ctx, "some templates are missing", "error", err)
}
return &store{fs: fs}, nil

View File

@@ -7,7 +7,7 @@ import (
)
// base is the fundamental struct that implements the error interface.
// The order of the struct is 'TCMEUAS'.
// The order of the struct is 'TCMEUA'.
type base struct {
// t denotes the custom type of the error.
t typ
@@ -21,13 +21,16 @@ type base struct {
u string
// a denotes any additional error messages (if present).
a []string
// s contains the stacktrace captured at error creation time.
s stacktrace
}
// Stacktrace returns the stacktrace captured at error creation time, formatted as a string.
func (b *base) Stacktrace() string {
return b.s.String()
func (b *base) LogValue() slog.Value {
return slog.GroupValue(
slog.String("type", b.t.s),
slog.String("code", b.c.s),
slog.String("message", b.m),
slog.String("url", b.u),
slog.Any("additional", b.a),
)
}
// base implements Error interface.
@@ -48,7 +51,6 @@ func New(t typ, code Code, message string) *base {
e: nil,
u: "",
a: []string{},
s: newStackTrace(),
}
}
@@ -59,7 +61,6 @@ func Newf(t typ, code Code, format string, args ...any) *base {
c: code,
m: fmt.Sprintf(format, args...),
e: nil,
s: newStackTrace(),
}
}
@@ -71,7 +72,6 @@ func Wrapf(cause error, t typ, code Code, format string, args ...any) *base {
c: code,
m: fmt.Sprintf(format, args...),
e: cause,
s: newStackTrace(),
}
}
@@ -82,17 +82,12 @@ func Wrap(cause error, t typ, code Code, message string) *base {
c: code,
m: message,
e: cause,
s: newStackTrace(),
}
}
// WithAdditionalf adds an additional error message to the existing error.
func WithAdditionalf(cause error, format string, args ...any) *base {
t, c, m, e, u, a := Unwrapb(cause)
var s stacktrace
if original, ok := cause.(*base); ok {
s = original.s
}
b := &base{
t: t,
c: c,
@@ -100,7 +95,6 @@ func WithAdditionalf(cause error, format string, args ...any) *base {
e: e,
u: u,
a: a,
s: s,
}
return b.WithAdditional(append(a, fmt.Sprintf(format, args...))...)
@@ -115,7 +109,6 @@ func (b *base) WithUrl(u string) *base {
e: b.e,
u: u,
a: b.a,
s: b.s,
}
}
@@ -128,7 +121,6 @@ func (b *base) WithAdditional(a ...string) *base {
e: b.e,
u: b.u,
a: a,
s: b.s,
}
}
@@ -231,8 +223,3 @@ func WrapTimeoutf(cause error, code Code, format string, args ...any) *base {
func NewTimeoutf(code Code, format string, args ...any) *base {
return Newf(TypeTimeout, code, format, args...)
}
// Attr returns an slog.Attr with a standardized "exception" key for the given error.
func Attr(err error) slog.Attr {
return slog.Any("exception", err)
}

View File

@@ -51,10 +51,3 @@ func TestUnwrapb(t *testing.T) {
atyp, _, _, _, _, _ = Unwrapb(oerr)
assert.Equal(t, TypeInternal, atyp)
}
func TestAttr(t *testing.T) {
err := New(TypeInternal, MustNewCode("test_code"), "test error")
attr := Attr(err)
assert.Equal(t, "exception", attr.Key)
assert.Equal(t, err, attr.Value.Any())
}

View File

@@ -1,38 +0,0 @@
package errors
import (
"fmt"
"runtime"
"strings"
)
// stacktrace holds a snapshot of program counters.
type stacktrace []uintptr
// newStackTrace captures a stack trace, skipping 3 frames to record the
// snapshot at the origin of the error:
// 1. runtime.Callers
// 2. newStackTrace
// 3. the constructor (New, Newf, Wrapf, Wrap)
//
// Inspired by https://github.com/thanos-io/thanos/blob/main/pkg/errors/stacktrace.go
func newStackTrace() stacktrace {
const depth = 16
pc := make([]uintptr, depth)
n := runtime.Callers(3, pc)
return stacktrace(pc[:n:n])
}
// String formats the stacktrace as function/file/line pairs.
func (s stacktrace) String() string {
var buf strings.Builder
frames := runtime.CallersFrames(s)
for {
frame, more := frames.Next()
fmt.Fprintf(&buf, "%s\n\t%s:%d\n", frame.Function, frame.File, frame.Line)
if !more {
break
}
}
return buf.String()
}

View File

@@ -37,7 +37,7 @@ func NewRegistry(logger *slog.Logger, services ...NamedService) (*Registry, erro
}
return &Registry{
logger: logger.With(slog.String("pkg", "go.signoz.io/pkg/factory")),
logger: logger.With("pkg", "go.signoz.io/pkg/factory"),
services: m,
startCh: make(chan error, 1),
stopCh: make(chan error, len(services)),
@@ -47,7 +47,7 @@ func NewRegistry(logger *slog.Logger, services ...NamedService) (*Registry, erro
func (r *Registry) Start(ctx context.Context) {
for _, s := range r.services.GetInOrder() {
go func(s NamedService) {
r.logger.InfoContext(ctx, "starting service", slog.Any("service", s.Name()))
r.logger.InfoContext(ctx, "starting service", "service", s.Name())
err := s.Start(ctx)
r.startCh <- err
}(s)
@@ -61,11 +61,11 @@ func (r *Registry) Wait(ctx context.Context) error {
select {
case <-ctx.Done():
r.logger.InfoContext(ctx, "caught context error, exiting", errors.Attr(ctx.Err()))
r.logger.InfoContext(ctx, "caught context error, exiting", "error", ctx.Err())
case s := <-interrupt:
r.logger.InfoContext(ctx, "caught interrupt signal, exiting", slog.Any("signal", s))
r.logger.InfoContext(ctx, "caught interrupt signal, exiting", "signal", s)
case err := <-r.startCh:
r.logger.ErrorContext(ctx, "caught service error, exiting", errors.Attr(err))
r.logger.ErrorContext(ctx, "caught service error, exiting", "error", err)
return err
}
@@ -75,7 +75,7 @@ func (r *Registry) Wait(ctx context.Context) error {
func (r *Registry) Stop(ctx context.Context) error {
for _, s := range r.services.GetInOrder() {
go func(s NamedService) {
r.logger.InfoContext(ctx, "stopping service", slog.Any("service", s.Name()))
r.logger.InfoContext(ctx, "stopping service", "service", s.Name())
err := s.Stop(ctx)
r.stopCh <- err
}(s)

View File

@@ -35,7 +35,7 @@ type scoped struct {
func NewScopedProviderSettings(settings ProviderSettings, pkgName string) *scoped {
return &scoped{
logger: settings.Logger.With(slog.String("logger", pkgName)),
logger: settings.Logger.With("logger", pkgName),
meter: settings.MeterProvider.Meter(pkgName),
tracer: settings.TracerProvider.Tracer(pkgName),
prometheusRegisterer: settings.PrometheusRegisterer,

View File

@@ -2,7 +2,6 @@ package configflagger
import (
"context"
"log/slog"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
@@ -35,7 +34,7 @@ func New(ctx context.Context, ps factory.ProviderSettings, c flagger.Config, reg
feature, _, err := registry.GetByString(name)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", slog.String("name", name), slog.String("kind", "boolean"))
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", "name", name, "kind", "boolean")
continue
}
return nil, err
@@ -53,7 +52,7 @@ func New(ctx context.Context, ps factory.ProviderSettings, c flagger.Config, reg
feature, _, err := registry.GetByString(name)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", slog.String("name", name), slog.String("kind", "string"))
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", "name", name, "kind", "string")
continue
}
return nil, err
@@ -71,7 +70,7 @@ func New(ctx context.Context, ps factory.ProviderSettings, c flagger.Config, reg
feature, _, err := registry.GetByString(name)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", slog.String("name", name), slog.String("kind", "float"))
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", "name", name, "kind", "float")
continue
}
return nil, err
@@ -89,7 +88,7 @@ func New(ctx context.Context, ps factory.ProviderSettings, c flagger.Config, reg
feature, _, err := registry.GetByString(name)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", slog.String("name", name), slog.String("kind", "integer"))
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", "name", name, "kind", "integer")
continue
}
return nil, err
@@ -107,7 +106,7 @@ func New(ctx context.Context, ps factory.ProviderSettings, c flagger.Config, reg
feature, _, err := registry.GetByString(name)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", slog.String("name", name), slog.String("kind", "object"))
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", "name", name, "kind", "object")
continue
}
return nil, err

View File

@@ -2,13 +2,10 @@ package flagger
import (
"context"
"log/slog"
"github.com/open-feature/go-sdk/openfeature"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/types/featuretypes"
"github.com/open-feature/go-sdk/openfeature"
)
// Any feature flag provider has to implement this interface.
@@ -98,7 +95,7 @@ func (f *flagger) Boolean(ctx context.Context, flag featuretypes.Name, evalCtx f
// check if the feature is present in the default registry
feature, _, err := f.registry.Get(flag)
if err != nil {
f.settings.Logger().ErrorContext(ctx, "failed to get feature from default registry", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get feature from default registry", "error", err, "flag", flag)
return false, err
}
@@ -106,7 +103,7 @@ func (f *flagger) Boolean(ctx context.Context, flag featuretypes.Name, evalCtx f
defaultValue, _, err := featuretypes.VariantValue[bool](feature, feature.DefaultVariant)
if err != nil {
// something which should never happen
f.settings.Logger().ErrorContext(ctx, "failed to get default value from feature", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get default value from feature", "error", err, "flag", flag)
return false, err
}
@@ -115,7 +112,7 @@ func (f *flagger) Boolean(ctx context.Context, flag featuretypes.Name, evalCtx f
for _, client := range f.clients {
value, err := client.BooleanValue(ctx, flag.String(), defaultValue, evalCtx.Ctx())
if err != nil {
f.settings.Logger().ErrorContext(ctx, "failed to get value from client", errors.Attr(err), slog.Any("flag", flag), slog.String("client", client.Metadata().Name()))
f.settings.Logger().ErrorContext(ctx, "failed to get value from client", "error", err, "flag", flag, "client", client.Metadata().Name)
continue
}
@@ -131,7 +128,7 @@ func (f *flagger) String(ctx context.Context, flag featuretypes.Name, evalCtx fe
// check if the feature is present in the default registry
feature, _, err := f.registry.Get(flag)
if err != nil {
f.settings.Logger().ErrorContext(ctx, "failed to get feature from default registry", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get feature from default registry", "error", err, "flag", flag)
return "", err
}
@@ -139,7 +136,7 @@ func (f *flagger) String(ctx context.Context, flag featuretypes.Name, evalCtx fe
defaultValue, _, err := featuretypes.VariantValue[string](feature, feature.DefaultVariant)
if err != nil {
// something which should never happen
f.settings.Logger().ErrorContext(ctx, "failed to get default value from feature", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get default value from feature", "error", err, "flag", flag)
return "", err
}
@@ -148,7 +145,7 @@ func (f *flagger) String(ctx context.Context, flag featuretypes.Name, evalCtx fe
for _, client := range f.clients {
value, err := client.StringValue(ctx, flag.String(), defaultValue, evalCtx.Ctx())
if err != nil {
f.settings.Logger().WarnContext(ctx, "failed to get value from client", errors.Attr(err), slog.Any("flag", flag), slog.String("client", client.Metadata().Name()))
f.settings.Logger().WarnContext(ctx, "failed to get value from client", "error", err, "flag", flag, "client", client.Metadata().Name)
continue
}
@@ -164,7 +161,7 @@ func (f *flagger) Float(ctx context.Context, flag featuretypes.Name, evalCtx fea
// check if the feature is present in the default registry
feature, _, err := f.registry.Get(flag)
if err != nil {
f.settings.Logger().ErrorContext(ctx, "failed to get feature from default registry", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get feature from default registry", "error", err, "flag", flag)
return 0, err
}
@@ -172,7 +169,7 @@ func (f *flagger) Float(ctx context.Context, flag featuretypes.Name, evalCtx fea
defaultValue, _, err := featuretypes.VariantValue[float64](feature, feature.DefaultVariant)
if err != nil {
// something which should never happen
f.settings.Logger().ErrorContext(ctx, "failed to get default value from feature", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get default value from feature", "error", err, "flag", flag)
return 0, err
}
@@ -181,7 +178,7 @@ func (f *flagger) Float(ctx context.Context, flag featuretypes.Name, evalCtx fea
for _, client := range f.clients {
value, err := client.FloatValue(ctx, flag.String(), defaultValue, evalCtx.Ctx())
if err != nil {
f.settings.Logger().WarnContext(ctx, "failed to get value from client", errors.Attr(err), slog.Any("flag", flag), slog.String("client", client.Metadata().Name()))
f.settings.Logger().WarnContext(ctx, "failed to get value from client", "error", err, "flag", flag, "client", client.Metadata().Name)
continue
}
@@ -197,7 +194,7 @@ func (f *flagger) Int(ctx context.Context, flag featuretypes.Name, evalCtx featu
// check if the feature is present in the default registry
feature, _, err := f.registry.Get(flag)
if err != nil {
f.settings.Logger().ErrorContext(ctx, "failed to get feature from default registry", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get feature from default registry", "error", err, "flag", flag)
return 0, err
}
@@ -205,7 +202,7 @@ func (f *flagger) Int(ctx context.Context, flag featuretypes.Name, evalCtx featu
defaultValue, _, err := featuretypes.VariantValue[int64](feature, feature.DefaultVariant)
if err != nil {
// something which should never happen
f.settings.Logger().ErrorContext(ctx, "failed to get default value from feature", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get default value from feature", "error", err, "flag", flag)
return 0, err
}
@@ -214,7 +211,7 @@ func (f *flagger) Int(ctx context.Context, flag featuretypes.Name, evalCtx featu
for _, client := range f.clients {
value, err := client.IntValue(ctx, flag.String(), defaultValue, evalCtx.Ctx())
if err != nil {
f.settings.Logger().WarnContext(ctx, "failed to get value from client", errors.Attr(err), slog.Any("flag", flag), slog.String("client", client.Metadata().Name()))
f.settings.Logger().WarnContext(ctx, "failed to get value from client", "error", err, "flag", flag, "client", client.Metadata().Name)
continue
}
@@ -230,7 +227,7 @@ func (f *flagger) Object(ctx context.Context, flag featuretypes.Name, evalCtx fe
// check if the feature is present in the default registry
feature, _, err := f.registry.Get(flag)
if err != nil {
f.settings.Logger().ErrorContext(ctx, "failed to get feature from default registry", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get feature from default registry", "error", err, "flag", flag)
return nil, err
}
@@ -238,7 +235,7 @@ func (f *flagger) Object(ctx context.Context, flag featuretypes.Name, evalCtx fe
defaultValue, _, err := featuretypes.VariantValue[any](feature, feature.DefaultVariant)
if err != nil {
// something which should never happen
f.settings.Logger().ErrorContext(ctx, "failed to get default value from feature", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get default value from feature", "error", err, "flag", flag)
return nil, err
}
@@ -247,7 +244,7 @@ func (f *flagger) Object(ctx context.Context, flag featuretypes.Name, evalCtx fe
for _, client := range f.clients {
value, err := client.ObjectValue(ctx, flag.String(), defaultValue, evalCtx.Ctx())
if err != nil {
f.settings.Logger().WarnContext(ctx, "failed to get value from client", errors.Attr(err), slog.Any("flag", flag), slog.String("client", client.Metadata().Name()))
f.settings.Logger().WarnContext(ctx, "failed to get value from client", "error", err, "flag", flag, "client", client.Metadata().Name)
continue
}
@@ -265,7 +262,7 @@ func (f *flagger) BooleanOrEmpty(ctx context.Context, flag featuretypes.Name, ev
defaultValue := false
value, err := f.Boolean(ctx, flag, evalCtx)
if err != nil {
f.settings.Logger().ErrorContext(ctx, "failed to get value from flagger service", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get value from flagger service", "error", err, "flag", flag)
return defaultValue
}
return value
@@ -275,7 +272,7 @@ func (f *flagger) StringOrEmpty(ctx context.Context, flag featuretypes.Name, eva
defaultValue := ""
value, err := f.String(ctx, flag, evalCtx)
if err != nil {
f.settings.Logger().ErrorContext(ctx, "failed to get value from flagger service", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get value from flagger service", "error", err, "flag", flag)
return defaultValue
}
return value
@@ -285,7 +282,7 @@ func (f *flagger) FloatOrEmpty(ctx context.Context, flag featuretypes.Name, eval
defaultValue := 0.0
value, err := f.Float(ctx, flag, evalCtx)
if err != nil {
f.settings.Logger().ErrorContext(ctx, "failed to get value from flagger service", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get value from flagger service", "error", err, "flag", flag)
return defaultValue
}
return value
@@ -295,7 +292,7 @@ func (f *flagger) IntOrEmpty(ctx context.Context, flag featuretypes.Name, evalCt
defaultValue := int64(0)
value, err := f.Int(ctx, flag, evalCtx)
if err != nil {
f.settings.Logger().ErrorContext(ctx, "failed to get value from flagger service", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get value from flagger service", "error", err, "flag", flag)
return defaultValue
}
return value
@@ -305,7 +302,7 @@ func (f *flagger) ObjectOrEmpty(ctx context.Context, flag featuretypes.Name, eva
defaultValue := struct{}{}
value, err := f.Object(ctx, flag, evalCtx)
if err != nil {
f.settings.Logger().ErrorContext(ctx, "failed to get value from flagger service", errors.Attr(err), slog.Any("flag", flag))
f.settings.Logger().ErrorContext(ctx, "failed to get value from flagger service", "error", err, "flag", flag)
return defaultValue
}
return value
@@ -339,7 +336,7 @@ func (f *flagger) List(ctx context.Context, evalCtx featuretypes.FlaggerEvaluati
for _, provider := range f.providers {
pFeatures, err := provider.List(ctx)
if err != nil {
f.settings.Logger().WarnContext(ctx, "failed to get features from provider", errors.Attr(err), slog.String("provider", provider.Metadata().Name))
f.settings.Logger().WarnContext(ctx, "failed to get features from provider", "error", err, "provider", provider.Metadata().Name)
continue
}

View File

@@ -9,8 +9,6 @@ import (
"github.com/gojek/heimdall/v7"
semconv "go.opentelemetry.io/otel/semconv/v1.27.0"
"github.com/SigNoz/signoz/pkg/errors"
)
type reqResLog struct {
@@ -53,7 +51,7 @@ func (plugin *reqResLog) OnRequestEnd(request *http.Request, response *http.Resp
bodybytes, err := io.ReadAll(response.Body)
if err != nil {
plugin.logger.DebugContext(request.Context(), "::UNABLE-TO-LOG-RESPONSE-BODY::", errors.Attr(err))
plugin.logger.DebugContext(request.Context(), "::UNABLE-TO-LOG-RESPONSE-BODY::", "error", err)
} else {
_ = response.Body.Close()
response.Body = io.NopCloser(bytes.NewBuffer(bodybytes))
@@ -71,7 +69,7 @@ func (plugin *reqResLog) OnRequestEnd(request *http.Request, response *http.Resp
func (plugin *reqResLog) OnError(request *http.Request, err error) {
host, port, _ := net.SplitHostPort(request.Host)
fields := []any{
errors.Attr(err),
"error", err,
string(semconv.HTTPRequestMethodKey), request.Method,
string(semconv.URLPathKey), request.URL.Path,
string(semconv.URLSchemeKey), request.URL.Scheme,

View File

@@ -42,7 +42,7 @@ func (middleware *AuthZ) ViewAccess(next http.HandlerFunc) http.HandlerFunc {
if claims.IdentNProvider == authtypes.IdentNProviderAPIKey.StringValue() {
if err := claims.IsViewer(); err != nil {
middleware.logger.WarnContext(ctx, authzDeniedMessage, slog.Any("claims", claims))
middleware.logger.WarnContext(ctx, authzDeniedMessage, "claims", claims)
render.Error(rw, err)
return
}
@@ -67,7 +67,7 @@ func (middleware *AuthZ) ViewAccess(next http.HandlerFunc) http.HandlerFunc {
selectors,
)
if err != nil {
middleware.logger.WarnContext(ctx, authzDeniedMessage, slog.Any("claims", claims))
middleware.logger.WarnContext(ctx, authzDeniedMessage, "claims", claims)
if errors.Asc(err, authtypes.ErrCodeAuthZForbidden) {
render.Error(rw, errors.New(errors.TypeForbidden, authtypes.ErrCodeAuthZForbidden, "only viewers/editors/admins can access this resource"))
return
@@ -92,7 +92,7 @@ func (middleware *AuthZ) EditAccess(next http.HandlerFunc) http.HandlerFunc {
if claims.IdentNProvider == authtypes.IdentNProviderAPIKey.StringValue() {
if err := claims.IsEditor(); err != nil {
middleware.logger.WarnContext(ctx, authzDeniedMessage, slog.Any("claims", claims))
middleware.logger.WarnContext(ctx, authzDeniedMessage, "claims", claims)
render.Error(rw, err)
return
}
@@ -116,7 +116,7 @@ func (middleware *AuthZ) EditAccess(next http.HandlerFunc) http.HandlerFunc {
selectors,
)
if err != nil {
middleware.logger.WarnContext(ctx, authzDeniedMessage, slog.Any("claims", claims))
middleware.logger.WarnContext(ctx, authzDeniedMessage, "claims", claims)
if errors.Asc(err, authtypes.ErrCodeAuthZForbidden) {
render.Error(rw, errors.New(errors.TypeForbidden, authtypes.ErrCodeAuthZForbidden, "only editors/admins can access this resource"))
return
@@ -141,7 +141,7 @@ func (middleware *AuthZ) AdminAccess(next http.HandlerFunc) http.HandlerFunc {
if claims.IdentNProvider == authtypes.IdentNProviderAPIKey.StringValue() {
if err := claims.IsAdmin(); err != nil {
middleware.logger.WarnContext(ctx, authzDeniedMessage, slog.Any("claims", claims))
middleware.logger.WarnContext(ctx, authzDeniedMessage, "claims", claims)
render.Error(rw, err)
return
}
@@ -164,7 +164,7 @@ func (middleware *AuthZ) AdminAccess(next http.HandlerFunc) http.HandlerFunc {
selectors,
)
if err != nil {
middleware.logger.WarnContext(ctx, authzDeniedMessage, slog.Any("claims", claims))
middleware.logger.WarnContext(ctx, authzDeniedMessage, "claims", claims)
if errors.Asc(err, authtypes.ErrCodeAuthZForbidden) {
render.Error(rw, errors.New(errors.TypeForbidden, authtypes.ErrCodeAuthZForbidden, "only admins can access this resource"))
return
@@ -188,7 +188,7 @@ func (middleware *AuthZ) SelfAccess(next http.HandlerFunc) http.HandlerFunc {
id := mux.Vars(req)["id"]
if err := claims.IsSelfAccess(id); err != nil {
middleware.logger.WarnContext(req.Context(), authzDeniedMessage, slog.Any("claims", claims))
middleware.logger.WarnContext(req.Context(), authzDeniedMessage, "claims", claims)
render.Error(rw, err)
return
}

View File

@@ -5,7 +5,6 @@ import (
"log/slog"
"net/http"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/identn"
"github.com/SigNoz/signoz/pkg/sharder"
"github.com/SigNoz/signoz/pkg/types"
@@ -53,7 +52,7 @@ func (m *IdentN) Wrap(next http.Handler) http.Handler {
ctx := r.Context()
claims := identity.ToClaims()
if err := m.sharder.IsMyOwnedKey(ctx, types.NewOrganizationKey(valuer.MustNewUUID(claims.OrgID))); err != nil {
m.logger.ErrorContext(ctx, identityCrossOrgMessage, slog.Any("claims", claims), errors.Attr(err))
m.logger.ErrorContext(ctx, identityCrossOrgMessage, "claims", claims, "error", err)
next.ServeHTTP(w, r)
return
}

View File

@@ -9,8 +9,6 @@ import (
"github.com/gorilla/mux"
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
"github.com/SigNoz/signoz/pkg/errors"
)
const (
@@ -29,7 +27,7 @@ func NewLogging(logger *slog.Logger, excludedRoutes []string) *Logging {
}
return &Logging{
logger: logger.With(slog.String("pkg", pkgname)),
logger: logger.With("pkg", pkgname),
excludedRoutes: excludedRoutesMap,
}
}
@@ -67,7 +65,7 @@ func (middleware *Logging) Wrap(next http.Handler) http.Handler {
string(semconv.HTTPServerRequestDurationName), time.Since(start),
)
if err != nil {
fields = append(fields, errors.Attr(err))
fields = append(fields, "error", err)
middleware.logger.ErrorContext(req.Context(), logMessage, fields...)
} else {
// when the status code is 400 or >=500, and the response body is not empty.

View File

@@ -6,8 +6,6 @@ import (
"net/http"
"strings"
"time"
"github.com/SigNoz/signoz/pkg/errors"
)
const (
@@ -38,7 +36,7 @@ func NewTimeout(logger *slog.Logger, excludedRoutes []string, defaultTimeout tim
}
return &Timeout{
logger: logger.With(slog.String("pkg", pkgname)),
logger: logger.With("pkg", pkgname),
excluded: excluded,
defaultTimeout: defaultTimeout,
maxTimeout: maxTimeout,
@@ -53,7 +51,7 @@ func (middleware *Timeout) Wrap(next http.Handler) http.Handler {
if incoming != "" {
parsed, err := time.ParseDuration(strings.TrimSpace(incoming) + "s")
if err != nil {
middleware.logger.WarnContext(req.Context(), "cannot parse timeout in header, using default timeout", slog.String("timeout", incoming), errors.Attr(err))
middleware.logger.WarnContext(req.Context(), "cannot parse timeout in header, using default timeout", "timeout", incoming, "error", err)
} else {
if parsed > middleware.maxTimeout {
actual = middleware.maxTimeout

View File

@@ -38,17 +38,17 @@ func New(logger *slog.Logger, cfg Config, handler http.Handler) (*Server, error)
return &Server{
srv: srv,
logger: logger.With(slog.String("pkg", "go.signoz.io/pkg/http/server")),
logger: logger.With("pkg", "go.signoz.io/pkg/http/server"),
handler: handler,
cfg: cfg,
}, nil
}
func (server *Server) Start(ctx context.Context) error {
server.logger.InfoContext(ctx, "starting http server", slog.String("address", server.srv.Addr))
server.logger.InfoContext(ctx, "starting http server", "address", server.srv.Addr)
if err := server.srv.ListenAndServe(); err != nil {
if err != http.ErrServerClosed {
server.logger.ErrorContext(ctx, "failed to start server", errors.Attr(err))
server.logger.ErrorContext(ctx, "failed to start server", "error", err)
return err
}
}
@@ -60,7 +60,7 @@ func (server *Server) Stop(ctx context.Context) error {
defer cancel()
if err := server.srv.Shutdown(ctx); err != nil {
server.logger.ErrorContext(ctx, "failed to stop server", errors.Attr(err))
server.logger.ErrorContext(ctx, "failed to stop server", "error", err)
return err
}

View File

@@ -5,14 +5,13 @@ import (
"net/http"
"time"
"golang.org/x/sync/singleflight"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/identn"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"golang.org/x/sync/singleflight"
)
// todo: will move this in types layer with service account integration
@@ -119,7 +118,7 @@ func (provider *provider) Post(ctx context.Context, _ *http.Request, _ authtypes
Where("revoked = false").
Exec(ctx)
if err != nil {
provider.settings.Logger().ErrorContext(ctx, "failed to update last used of api key", errors.Attr(err))
provider.settings.Logger().ErrorContext(ctx, "failed to update last used of api key", "error", err)
}
return true, nil
})

View File

@@ -79,16 +79,18 @@ func (provider *provider) GetIdentity(req *http.Request) (*authtypes.Identity, e
return nil, err
}
rootUser, err := provider.userGetter.GetRootUserByOrgID(ctx, org.ID)
rootUser, userRoles, err := provider.userGetter.GetRootUserByOrgID(ctx, org.ID)
if err != nil {
return nil, err
}
role := authtypes.SigNozManagedRoleToExistingLegacyRole[userRoles[0].Role.Name]
provider.identity = authtypes.NewIdentity(
rootUser.ID,
rootUser.OrgID,
rootUser.Email,
rootUser.Role,
role,
authtypes.IdentNProviderImpersonation,
)

View File

@@ -2,7 +2,6 @@ package identn
import (
"context"
"log/slog"
"net/http"
"github.com/SigNoz/signoz/pkg/factory"
@@ -70,7 +69,7 @@ func NewIdentNResolver(ctx context.Context, providerSettings factory.ProviderSet
func (c *identNResolver) GetIdentN(r *http.Request) IdentN {
for _, idn := range c.identNs {
if idn.Test(r) {
c.settings.Logger().DebugContext(r.Context(), "identN matched", slog.Any("provider", idn.Name()))
c.settings.Logger().DebugContext(r.Context(), "identN matched", "provider", idn.Name())
return idn
}
}

View File

@@ -6,13 +6,11 @@ import (
"strings"
"time"
"golang.org/x/sync/singleflight"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/identn"
"github.com/SigNoz/signoz/pkg/tokenizer"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"golang.org/x/sync/singleflight"
)
type provider struct {
@@ -83,7 +81,7 @@ func (provider *provider) Post(ctx context.Context, _ *http.Request, _ authtypes
_, _, _ = provider.sfGroup.Do(accessToken, func() (any, error) {
if err := provider.tokenizer.SetLastObservedAt(ctx, accessToken, time.Now()); err != nil {
provider.settings.Logger().ErrorContext(ctx, "failed to set last observed at", errors.Attr(err))
provider.settings.Logger().ErrorContext(ctx, "failed to set last observed at", "error", err)
return false, err
}
return true, nil

View File

@@ -10,10 +10,16 @@ import (
type zapToSlogConverter struct{}
func NewLogger(config Config) *slog.Logger {
func NewLogger(config Config, wrappers ...loghandler.Wrapper) *slog.Logger {
logger := slog.New(
loghandler.New(
slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: config.Logs.Level, ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {
slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: config.Logs.Level, AddSource: true, ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {
// This is more in line with OpenTelemetry semantic conventions
if a.Key == slog.SourceKey {
a.Key = "code"
return a
}
if a.Key == slog.TimeKey {
a.Key = "timestamp"
return a
@@ -21,10 +27,7 @@ func NewLogger(config Config) *slog.Logger {
return a
}}),
loghandler.NewSource(),
loghandler.NewCorrelation(),
loghandler.NewFiltering(),
loghandler.NewException(),
wrappers...,
),
)

View File

@@ -1,53 +0,0 @@
package loghandler
import (
"context"
"log/slog"
"github.com/SigNoz/signoz/pkg/errors"
)
type exception struct{}
func NewException() *exception {
return &exception{}
}
func (h *exception) Wrap(next LogHandler) LogHandler {
return LogHandlerFunc(func(ctx context.Context, record slog.Record) error {
var foundErr error
newRecord := slog.NewRecord(record.Time, record.Level, record.Message, record.PC)
record.Attrs(func(a slog.Attr) bool {
if a.Key == "exception" {
if err, ok := a.Value.Any().(error); ok {
foundErr = err
return true
}
}
newRecord.AddAttrs(a)
return true
})
if foundErr == nil {
return next.Handle(ctx, record)
}
t, c, m, _, _, _ := errors.Unwrapb(foundErr)
newRecord.AddAttrs(
slog.String("exception.type", t.String()),
slog.String("exception.code", c.String()),
slog.String("exception.message", m),
)
// Use the stacktrace captured at error creation time if available.
type stacktracer interface {
Stacktrace() string
}
if st, ok := foundErr.(stacktracer); ok && st.Stacktrace() != "" {
newRecord.AddAttrs(slog.String("exception.stacktrace", st.Stacktrace()))
}
return next.Handle(ctx, newRecord)
})
}

View File

@@ -1,100 +0,0 @@
package loghandler
import (
"bytes"
"context"
"encoding/json"
"log/slog"
"testing"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestException(t *testing.T) {
testCases := []struct {
name string
attrs []slog.Attr
exceptionType string
exceptionCode string
exceptionMessage string
hasException bool
}{
{
name: "PkgError",
attrs: []slog.Attr{
errors.Attr(errors.New(errors.TypeNotFound, errors.MustNewCode("test_code"), "resource not found")),
},
exceptionType: "not-found",
exceptionCode: "test_code",
exceptionMessage: "resource not found",
hasException: true,
},
{
name: "StdlibError",
attrs: []slog.Attr{
errors.Attr(errors.Newf(errors.TypeInternal, errors.MustNewCode("internal"), "something went wrong")),
},
exceptionType: "internal",
exceptionCode: "internal",
exceptionMessage: "something went wrong",
hasException: true,
},
{
name: "WrappedPkgError",
attrs: []slog.Attr{
errors.Attr(errors.Wrapf(errors.New(errors.TypeNotFound, errors.MustNewCode("not_found"), "db connection failed"), errors.TypeInternal, errors.MustNewCode("db_error"), "failed to fetch user")),
},
exceptionType: "internal",
exceptionCode: "db_error",
exceptionMessage: "failed to fetch user",
hasException: true,
},
{
name: "NoError",
attrs: []slog.Attr{
slog.String("key", "value"),
},
hasException: false,
},
{
name: "ExceptionKeyWithNonError",
attrs: []slog.Attr{
slog.String("exception", "not an error type"),
},
hasException: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
buf := bytes.NewBuffer(nil)
logger := slog.New(&handler{
base: slog.NewJSONHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug}),
wrappers: []Wrapper{NewException()},
})
logger.LogAttrs(context.Background(), slog.LevelError, "operation failed", tc.attrs...)
m := make(map[string]any)
err := json.Unmarshal(buf.Bytes(), &m)
require.NoError(t, err)
assert.Equal(t, "operation failed", m["msg"])
if tc.hasException {
assert.Equal(t, tc.exceptionType, m["exception.type"])
assert.Equal(t, tc.exceptionCode, m["exception.code"])
assert.Equal(t, tc.exceptionMessage, m["exception.message"])
stacktrace, ok := m["exception.stacktrace"].(string)
require.True(t, ok)
assert.Contains(t, stacktrace, "exception_test.go:")
} else {
assert.Nil(t, m["exception.type"])
assert.Nil(t, m["exception.code"])
assert.Nil(t, m["exception.message"])
assert.Nil(t, m["exception.stacktrace"])
}
})
}
}

View File

@@ -18,7 +18,7 @@ func TestFiltering_SuppressesContextCanceled(t *testing.T) {
buf := bytes.NewBuffer(nil)
logger := slog.New(&handler{base: slog.NewJSONHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug}), wrappers: []Wrapper{filtering}})
logger.ErrorContext(context.Background(), "operation failed", slog.Any("error", context.Canceled))
logger.ErrorContext(context.Background(), "operation failed", "error", context.Canceled)
assert.Empty(t, buf.String(), "log with context.Canceled should be suppressed")
}
@@ -29,7 +29,7 @@ func TestFiltering_AllowsOtherErrors(t *testing.T) {
buf := bytes.NewBuffer(nil)
logger := slog.New(&handler{base: slog.NewJSONHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug}), wrappers: []Wrapper{filtering}})
logger.ErrorContext(context.Background(), "operation failed", slog.Any("error", errors.New(errors.TypeInternal, errors.CodeInternal, "some other error")))
logger.ErrorContext(context.Background(), "operation failed", "error", errors.New(errors.TypeInternal, errors.CodeInternal, "some other error"))
m := make(map[string]any)
err := json.Unmarshal(buf.Bytes(), &m)
@@ -43,7 +43,7 @@ func TestFiltering_AllowsLogsWithoutErrors(t *testing.T) {
buf := bytes.NewBuffer(nil)
logger := slog.New(&handler{base: slog.NewJSONHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug}), wrappers: []Wrapper{filtering}})
logger.InfoContext(context.Background(), "normal log", slog.String("key", "value"))
logger.InfoContext(context.Background(), "normal log", "key", "value")
m := make(map[string]any)
err := json.Unmarshal(buf.Bytes(), &m)

View File

@@ -1,28 +0,0 @@
package loghandler
import (
"context"
"log/slog"
"runtime"
)
type source struct{}
func NewSource() *source {
return &source{}
}
func (h *source) Wrap(next LogHandler) LogHandler {
return LogHandlerFunc(func(ctx context.Context, record slog.Record) error {
if record.PC != 0 {
frame, _ := runtime.CallersFrames([]uintptr{record.PC}).Next()
record.AddAttrs(
slog.String("code.filepath", frame.File),
slog.String("code.function", frame.Function),
slog.Int("code.lineno", frame.Line),
)
}
return next.Handle(ctx, record)
})
}

View File

@@ -1,37 +0,0 @@
package loghandler
import (
"bytes"
"context"
"encoding/json"
"log/slog"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSource(t *testing.T) {
src := NewSource()
buf := bytes.NewBuffer(nil)
logger := slog.New(&handler{base: slog.NewJSONHandler(buf, &slog.HandlerOptions{Level: slog.LevelDebug}), wrappers: []Wrapper{src}})
logger.InfoContext(context.Background(), "test")
m := make(map[string]any)
err := json.Unmarshal(buf.Bytes(), &m)
require.NoError(t, err)
assert.Contains(t, m, "code.filepath")
assert.Contains(t, m, "code.function")
assert.Contains(t, m, "code.lineno")
assert.Contains(t, m["code.filepath"], "source_test.go")
assert.Contains(t, m["code.function"], "TestSource")
assert.NotZero(t, m["code.lineno"])
// Ensure the nested "source" key is not present.
assert.NotContains(t, m, "source")
assert.NotContains(t, m, "code")
}

View File

@@ -6,6 +6,7 @@ import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/instrumentation/loghandler"
"github.com/SigNoz/signoz/pkg/version"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
@@ -115,7 +116,7 @@ func New(ctx context.Context, cfg Config, build version.Build, serviceName strin
meterProvider: meterProvider,
meterProviderShutdownFunc: meterProviderShutdownFunc,
prometheusRegistry: prometheusRegistry,
logger: NewLogger(cfg),
logger: NewLogger(cfg, loghandler.NewCorrelation(), loghandler.NewFiltering()),
startCh: make(chan struct{}),
}, nil
}

View File

@@ -2,7 +2,6 @@ package impldashboard
import (
"context"
"log/slog"
"slices"
"github.com/SigNoz/signoz/pkg/analytics"
@@ -309,7 +308,7 @@ func (module *module) checkClickHouseQueriesForMetricNames(ctx context.Context,
result, err := module.queryParser.AnalyzeQueryFilter(ctx, qbtypes.QueryTypeClickHouseSQL, queryStr)
if err != nil {
// Log warning and continue - parsing errors shouldn't break the search
module.settings.Logger().WarnContext(ctx, "failed to parse ClickHouse query", slog.String("query", queryStr), errors.Attr(err))
module.settings.Logger().WarnContext(ctx, "failed to parse ClickHouse query", "query", queryStr, "error", err)
continue
}
@@ -344,7 +343,7 @@ func (module *module) checkPromQLQueriesForMetricNames(ctx context.Context, quer
result, err := module.queryParser.AnalyzeQueryFilter(ctx, qbtypes.QueryTypePromQL, queryStr)
if err != nil {
// Log warning and continue - parsing errors shouldn't break the search
module.settings.Logger().WarnContext(ctx, "failed to parse PromQL query", slog.String("query", queryStr), errors.Attr(err))
module.settings.Logger().WarnContext(ctx, "failed to parse PromQL query", "query", queryStr, "error", err)
continue
}

View File

@@ -8,9 +8,6 @@ import (
"strings"
"time"
sqlbuilder "github.com/huandu/go-sqlbuilder"
"golang.org/x/sync/errgroup"
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
@@ -28,6 +25,8 @@ import (
"github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/SigNoz/signoz/pkg/valuer"
sqlbuilder "github.com/huandu/go-sqlbuilder"
"golang.org/x/sync/errgroup"
)
type module struct {
@@ -511,7 +510,7 @@ func (m *module) fetchMetadataFromCache(ctx context.Context, orgID valuer.UUID,
if err := m.cache.Get(ctx, orgID, cacheKey, &cachedMetadata); err == nil {
hits[metricName] = &cachedMetadata
} else {
m.logger.WarnContext(ctx, "cache miss for metric metadata", slog.String("metric_name", metricName), errors.Attr(err))
m.logger.WarnContext(ctx, "cache miss for metric metadata", "metric_name", metricName, "error", err)
misses = append(misses, metricName)
}
}
@@ -567,7 +566,7 @@ func (m *module) fetchUpdatedMetadata(ctx context.Context, orgID valuer.UUID, me
cacheKey := generateMetricMetadataCacheKey(metricName)
if err := m.cache.Set(ctx, orgID, cacheKey, &metricMetadata, 0); err != nil {
m.logger.WarnContext(ctx, "failed to set metric metadata in cache", slog.String("metric_name", metricName), errors.Attr(err))
m.logger.WarnContext(ctx, "failed to set metric metadata in cache", "metric_name", metricName, "error", err)
}
}
@@ -627,7 +626,7 @@ func (m *module) fetchTimeseriesMetadata(ctx context.Context, orgID valuer.UUID,
cacheKey := generateMetricMetadataCacheKey(metricName)
if err := m.cache.Set(ctx, orgID, cacheKey, &metricMetadata, 0); err != nil {
m.logger.WarnContext(ctx, "failed to set metric metadata in cache", slog.String("metric_name", metricName), errors.Attr(err))
m.logger.WarnContext(ctx, "failed to set metric metadata in cache", "metric_name", metricName, "error", err)
}
}
@@ -766,7 +765,7 @@ func (m *module) insertMetricsMetadata(ctx context.Context, orgID valuer.UUID, r
}
cacheKey := generateMetricMetadataCacheKey(req.MetricName)
if err := m.cache.Set(ctx, orgID, cacheKey, metricMetadata, 0); err != nil {
m.logger.WarnContext(ctx, "failed to set metric metadata in cache after insert", slog.String("metric_name", req.MetricName), errors.Attr(err))
m.logger.WarnContext(ctx, "failed to set metric metadata in cache after insert", "metric_name", req.MetricName, "error", err)
}
return nil

View File

@@ -273,7 +273,7 @@ func (module *module) CreateFactorAPIKey(ctx context.Context, factorAPIKey *serv
"KeyID": factorAPIKey.ID.String(),
"KeyCreatedAt": factorAPIKey.CreatedAt.String(),
}); err != nil {
module.settings.Logger().ErrorContext(ctx, "failed to send email", errors.Attr(err))
module.settings.Logger().ErrorContext(ctx, "failed to send email", "error", err)
}
return nil
@@ -328,7 +328,7 @@ func (module *module) RevokeFactorAPIKey(ctx context.Context, serviceAccountID v
"KeyID": factorAPIKey.ID.String(),
"KeyCreatedAt": factorAPIKey.CreatedAt.String(),
}); err != nil {
module.settings.Logger().ErrorContext(ctx, "failed to send email", errors.Attr(err))
module.settings.Logger().ErrorContext(ctx, "failed to send email", "error", err)
}
return nil

View File

@@ -2,7 +2,6 @@ package implsession
import (
"context"
"log/slog"
"net/url"
"slices"
"strings"
@@ -24,18 +23,18 @@ import (
type module struct {
settings factory.ScopedProviderSettings
authNs map[authtypes.AuthNProvider]authn.AuthN
user user.Module
userSetter user.Setter
userGetter user.Getter
authDomain authdomain.Module
tokenizer tokenizer.Tokenizer
orgGetter organization.Getter
}
func NewModule(providerSettings factory.ProviderSettings, authNs map[authtypes.AuthNProvider]authn.AuthN, user user.Module, userGetter user.Getter, authDomain authdomain.Module, tokenizer tokenizer.Tokenizer, orgGetter organization.Getter) session.Module {
func NewModule(providerSettings factory.ProviderSettings, authNs map[authtypes.AuthNProvider]authn.AuthN, userSetter user.Setter, userGetter user.Getter, authDomain authdomain.Module, tokenizer tokenizer.Tokenizer, orgGetter organization.Getter) session.Module {
return &module{
settings: factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/pkg/modules/session/implsession"),
authNs: authNs,
user: user,
userSetter: userSetter,
userGetter: userGetter,
authDomain: authDomain,
tokenizer: tokenizer,
@@ -133,7 +132,7 @@ func (module *module) CreateCallbackAuthNSession(ctx context.Context, authNProvi
callbackIdentity, err := callbackAuthN.HandleCallback(ctx, values)
if err != nil {
module.settings.Logger().ErrorContext(ctx, "failed to handle callback", errors.Attr(err), slog.Any("authn_provider", authNProvider))
module.settings.Logger().ErrorContext(ctx, "failed to handle callback", "error", err, "authn_provider", authNProvider)
return "", err
}
@@ -143,23 +142,35 @@ func (module *module) CreateCallbackAuthNSession(ctx context.Context, authNProvi
}
roleMapping := authDomain.AuthDomainConfig().RoleMapping
role := roleMapping.NewRoleFromCallbackIdentity(callbackIdentity)
role, isAuthoritative := roleMapping.NewRoleFromCallbackIdentity(callbackIdentity)
user, err := types.NewUser(callbackIdentity.Name, callbackIdentity.Email, role, callbackIdentity.OrgID, types.UserStatusActive)
newUser, err := types.NewUser(callbackIdentity.Name, callbackIdentity.Email, callbackIdentity.OrgID, types.UserStatusActive)
if err != nil {
return "", err
}
user, err = module.user.GetOrCreateUser(ctx, user)
if isAuthoritative {
signozManagedRole := authtypes.MustGetSigNozManagedRoleFromExistingRole(role)
newUser, err = module.userSetter.GetOrCreateUser(ctx, newUser, user.WithRoleNames([]string{signozManagedRole}))
} else {
newUser, err = module.userSetter.GetOrCreateUser(ctx, newUser)
}
if err != nil {
return "", err
}
if err := user.ErrIfRoot(); err != nil {
if err := newUser.ErrIfRoot(); err != nil {
return "", errors.WithAdditionalf(err, "root user can only authenticate via password")
}
token, err := module.tokenizer.CreateToken(ctx, authtypes.NewIdentity(user.ID, user.OrgID, user.Email, user.Role, authtypes.IdentNProviderTokenizer), map[string]string{})
userRoles, err := module.userGetter.GetUserRoles(ctx, newUser.ID)
if err != nil {
return "", err
}
finalRole := authtypes.SigNozManagedRoleToExistingLegacyRole[userRoles[0].Role.Name]
token, err := module.tokenizer.CreateToken(ctx, authtypes.NewIdentity(newUser.ID, newUser.OrgID, newUser.Email, finalRole, authtypes.IdentNProviderTokenizer), map[string]string{})
if err != nil {
return "", err
}

View File

@@ -4,27 +4,40 @@ import (
"context"
"slices"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/modules/user"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/featuretypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type getter struct {
store types.UserStore
flagger flagger.Flagger
store types.UserStore
userRoleStore authtypes.UserRoleStore
flagger flagger.Flagger
}
func NewGetter(store types.UserStore, flagger flagger.Flagger) user.Getter {
return &getter{store: store, flagger: flagger}
func NewGetter(store types.UserStore, userRoleStore authtypes.UserRoleStore, flagger flagger.Flagger) user.Getter {
return &getter{store: store, userRoleStore: userRoleStore, flagger: flagger}
}
func (module *getter) GetRootUserByOrgID(ctx context.Context, orgID valuer.UUID) (*types.User, error) {
return module.store.GetRootUserByOrgID(ctx, orgID)
func (module *getter) GetRootUserByOrgID(ctx context.Context, orgID valuer.UUID) (*types.User, []*authtypes.UserRole, error) {
rootUser, err := module.store.GetRootUserByOrgID(ctx, orgID)
if err != nil {
return nil, nil, err
}
userRoles, err := module.userRoleStore.GetUserRolesByUserID(ctx, rootUser.ID)
if err != nil {
return nil, nil, err
}
return rootUser, userRoles, nil
}
func (module *getter) ListByOrgID(ctx context.Context, orgID valuer.UUID) ([]*types.User, error) {
func (module *getter) ListByOrgID(ctx context.Context, orgID valuer.UUID) ([]*types.DeprecatedUser, error) {
users, err := module.store.ListUsersByOrgID(ctx, orgID)
if err != nil {
return nil, err
@@ -38,43 +51,68 @@ func (module *getter) ListByOrgID(ctx context.Context, orgID valuer.UUID) ([]*ty
users = slices.DeleteFunc(users, func(user *types.User) bool { return user.IsRoot })
}
return users, nil
}
userIDs := make([]valuer.UUID, len(users))
for idx, user := range users {
userIDs[idx] = user.ID
}
func (module *getter) GetUsersByEmail(ctx context.Context, email valuer.Email) ([]*types.User, error) {
users, err := module.store.GetUsersByEmail(ctx, email)
userRoles, err := module.userRoleStore.ListUserRolesByOrgIDAndUserIDs(ctx, orgID, userIDs)
if err != nil {
return nil, err
}
return users, nil
// Build userID → role name mapping directly from the joined Role
userIDToRoleNames := make(map[valuer.UUID][]string)
for _, ur := range userRoles {
if ur.Role != nil {
userIDToRoleNames[ur.UserID] = append(userIDToRoleNames[ur.UserID], ur.Role.Name)
}
}
deprecatedUsers := make([]*types.DeprecatedUser, 0, len(users))
for _, user := range users {
roleNames := userIDToRoleNames[user.ID]
role := authtypes.SigNozManagedRoleToExistingLegacyRole[roleNames[0]]
deprecatedUsers = append(deprecatedUsers, types.NewDeprecatedUserFromUserAndRole(user, role))
}
return deprecatedUsers, nil
}
func (module *getter) GetByOrgIDAndID(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*types.User, error) {
func (module *getter) GetDeprecatedUserByOrgIDAndID(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*types.DeprecatedUser, error) {
user, err := module.store.GetByOrgIDAndID(ctx, orgID, id)
if err != nil {
return nil, err
}
return user, nil
userRoles, err := module.GetUserRoles(ctx, id)
if err != nil {
return nil, err
}
role := authtypes.SigNozManagedRoleToExistingLegacyRole[userRoles[0].Role.Name]
return types.NewDeprecatedUserFromUserAndRole(user, role), nil
}
func (module *getter) Get(ctx context.Context, id valuer.UUID) (*types.User, error) {
func (module *getter) Get(ctx context.Context, id valuer.UUID) (*types.DeprecatedUser, error) {
user, err := module.store.GetUser(ctx, id)
if err != nil {
return nil, err
}
return user, nil
}
func (module *getter) ListUsersByEmailAndOrgIDs(ctx context.Context, email valuer.Email, orgIDs []valuer.UUID) ([]*types.User, error) {
users, err := module.store.ListUsersByEmailAndOrgIDs(ctx, email, orgIDs)
userRoles, err := module.GetUserRoles(ctx, id)
if err != nil {
return nil, err
}
return users, nil
role := authtypes.SigNozManagedRoleToExistingLegacyRole[userRoles[0].Role.Name]
return types.NewDeprecatedUserFromUserAndRole(user, role), nil
}
func (module *getter) ListUsersByEmailAndOrgIDs(ctx context.Context, email valuer.Email, orgIDs []valuer.UUID) ([]*types.User, error) {
return module.store.ListUsersByEmailAndOrgIDs(ctx, email, orgIDs)
}
func (module *getter) CountByOrgID(ctx context.Context, orgID valuer.UUID) (int64, error) {
@@ -103,3 +141,34 @@ func (module *getter) GetFactorPasswordByUserID(ctx context.Context, userID valu
return factorPassword, nil
}
// this function restricts that only one non-deleted user email can exist for an org ID, if found more, it throws an error
func (module *getter) GetNonDeletedUserByEmailAndOrgID(ctx context.Context, email valuer.Email, orgID valuer.UUID) (*types.User, error) {
existingUsers, err := module.store.GetUsersByEmailAndOrgID(ctx, email, orgID)
if err != nil {
return nil, err
}
// filter out the deleted users
existingUsers = slices.DeleteFunc(existingUsers, func(user *types.User) bool { return user.ErrIfDeleted() != nil })
if len(existingUsers) > 1 {
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "Multiple non-deleted users found for email %s in org_id: %s", email.StringValue(), orgID.StringValue())
}
if len(existingUsers) == 1 {
return existingUsers[0], nil
}
return nil, errors.Newf(errors.TypeNotFound, errors.CodeNotFound, "No non-deleted user found with email %s in org_id: %s", email.StringValue(), orgID.StringValue())
}
func (module *getter) GetUserRoles(ctx context.Context, userID valuer.UUID) ([]*authtypes.UserRole, error) {
userRoles, err := module.userRoleStore.GetUserRolesByUserID(ctx, userID)
if err != nil {
return nil, err
}
return userRoles, nil
}

View File

@@ -19,12 +19,12 @@ import (
)
type handler struct {
module root.Module
setter root.Setter
getter root.Getter
}
func NewHandler(module root.Module, getter root.Getter) root.Handler {
return &handler{module: module, getter: getter}
func NewHandler(setter root.Setter, getter root.Getter) root.Handler {
return &handler{setter: setter, getter: getter}
}
func (h *handler) CreateInvite(rw http.ResponseWriter, r *http.Request) {
@@ -43,7 +43,7 @@ func (h *handler) CreateInvite(rw http.ResponseWriter, r *http.Request) {
return
}
invites, err := h.module.CreateBulkInvite(ctx, valuer.MustNewUUID(claims.OrgID), valuer.MustNewUUID(claims.UserID), &types.PostableBulkInviteRequest{
invites, err := h.setter.CreateBulkInvite(ctx, valuer.MustNewUUID(claims.OrgID), valuer.MustNewUUID(claims.UserID), &types.PostableBulkInviteRequest{
Invites: []types.PostableInvite{req},
})
if err != nil {
@@ -76,7 +76,7 @@ func (h *handler) CreateBulkInvite(rw http.ResponseWriter, r *http.Request) {
return
}
_, err = h.module.CreateBulkInvite(ctx, valuer.MustNewUUID(claims.OrgID), valuer.MustNewUUID(claims.UserID), &req)
_, err = h.setter.CreateBulkInvite(ctx, valuer.MustNewUUID(claims.OrgID), valuer.MustNewUUID(claims.UserID), &req)
if err != nil {
render.Error(rw, err)
return
@@ -97,7 +97,7 @@ func (h *handler) GetUser(w http.ResponseWriter, r *http.Request) {
return
}
user, err := h.getter.GetByOrgIDAndID(ctx, valuer.MustNewUUID(claims.OrgID), valuer.MustNewUUID(id))
user, err := h.getter.GetDeprecatedUserByOrgIDAndID(ctx, valuer.MustNewUUID(claims.OrgID), valuer.MustNewUUID(id))
if err != nil {
render.Error(w, err)
return
@@ -116,7 +116,7 @@ func (h *handler) GetMyUser(w http.ResponseWriter, r *http.Request) {
return
}
user, err := h.getter.GetByOrgIDAndID(ctx, valuer.MustNewUUID(claims.OrgID), valuer.MustNewUUID(claims.UserID))
user, err := h.getter.GetDeprecatedUserByOrgIDAndID(ctx, valuer.MustNewUUID(claims.OrgID), valuer.MustNewUUID(claims.UserID))
if err != nil {
render.Error(w, err)
return
@@ -156,13 +156,13 @@ func (h *handler) UpdateUser(w http.ResponseWriter, r *http.Request) {
return
}
var user types.User
user := types.DeprecatedUser{User: &types.User{}}
if err := json.NewDecoder(r.Body).Decode(&user); err != nil {
render.Error(w, err)
return
}
updatedUser, err := h.module.UpdateUser(ctx, valuer.MustNewUUID(claims.OrgID), id, &user, claims.UserID)
updatedUser, err := h.setter.UpdateUser(ctx, valuer.MustNewUUID(claims.OrgID), id, &user, claims.UserID)
if err != nil {
render.Error(w, err)
return
@@ -183,7 +183,7 @@ func (h *handler) DeleteUser(w http.ResponseWriter, r *http.Request) {
return
}
if err := h.module.DeleteUser(ctx, valuer.MustNewUUID(claims.OrgID), id, claims.UserID); err != nil {
if err := h.setter.DeleteUser(ctx, valuer.MustNewUUID(claims.OrgID), id, claims.UserID); err != nil {
render.Error(w, err)
return
}
@@ -203,13 +203,13 @@ func (handler *handler) GetResetPasswordToken(w http.ResponseWriter, r *http.Req
return
}
user, err := handler.getter.GetByOrgIDAndID(ctx, valuer.MustNewUUID(claims.OrgID), valuer.MustNewUUID(id))
user, err := handler.getter.GetDeprecatedUserByOrgIDAndID(ctx, valuer.MustNewUUID(claims.OrgID), valuer.MustNewUUID(id))
if err != nil {
render.Error(w, err)
return
}
token, err := handler.module.GetOrCreateResetPasswordToken(ctx, user.ID)
token, err := handler.setter.GetOrCreateResetPasswordToken(ctx, user.ID)
if err != nil {
render.Error(w, err)
return
@@ -228,7 +228,7 @@ func (handler *handler) ResetPassword(w http.ResponseWriter, r *http.Request) {
return
}
err := handler.module.UpdatePasswordByResetPasswordToken(ctx, req.Token, req.Password)
err := handler.setter.UpdatePasswordByResetPasswordToken(ctx, req.Token, req.Password)
if err != nil {
render.Error(w, err)
return
@@ -247,7 +247,7 @@ func (handler *handler) ChangePassword(w http.ResponseWriter, r *http.Request) {
return
}
err := handler.module.UpdatePassword(ctx, req.UserID, req.OldPassword, req.NewPassword)
err := handler.setter.UpdatePassword(ctx, req.UserID, req.OldPassword, req.NewPassword)
if err != nil {
render.Error(w, err)
return
@@ -266,7 +266,7 @@ func (h *handler) ForgotPassword(w http.ResponseWriter, r *http.Request) {
return
}
err := h.module.ForgotPassword(ctx, req.OrgID, req.Email, req.FrontendBaseURL)
err := h.setter.ForgotPassword(ctx, req.OrgID, req.Email, req.FrontendBaseURL)
if err != nil {
render.Error(w, err)
return
@@ -302,13 +302,13 @@ func (h *handler) CreateAPIKey(w http.ResponseWriter, r *http.Request) {
return
}
err = h.module.CreateAPIKey(ctx, apiKey)
err = h.setter.CreateAPIKey(ctx, apiKey)
if err != nil {
render.Error(w, err)
return
}
createdApiKey, err := h.module.GetAPIKey(ctx, valuer.MustNewUUID(claims.OrgID), apiKey.ID)
createdApiKey, err := h.setter.GetAPIKey(ctx, valuer.MustNewUUID(claims.OrgID), apiKey.ID)
if err != nil {
render.Error(w, err)
return
@@ -328,7 +328,7 @@ func (h *handler) ListAPIKeys(w http.ResponseWriter, r *http.Request) {
return
}
apiKeys, err := h.module.ListAPIKeys(ctx, valuer.MustNewUUID(claims.OrgID))
apiKeys, err := h.setter.ListAPIKeys(ctx, valuer.MustNewUUID(claims.OrgID))
if err != nil {
render.Error(w, err)
return
@@ -373,7 +373,7 @@ func (h *handler) UpdateAPIKey(w http.ResponseWriter, r *http.Request) {
}
//get the API Key
existingAPIKey, err := h.module.GetAPIKey(ctx, valuer.MustNewUUID(claims.OrgID), id)
existingAPIKey, err := h.setter.GetAPIKey(ctx, valuer.MustNewUUID(claims.OrgID), id)
if err != nil {
render.Error(w, err)
return
@@ -391,7 +391,7 @@ func (h *handler) UpdateAPIKey(w http.ResponseWriter, r *http.Request) {
return
}
err = h.module.UpdateAPIKey(ctx, id, &req, valuer.MustNewUUID(claims.UserID))
err = h.setter.UpdateAPIKey(ctx, id, &req, valuer.MustNewUUID(claims.UserID))
if err != nil {
render.Error(w, err)
return
@@ -418,7 +418,7 @@ func (h *handler) RevokeAPIKey(w http.ResponseWriter, r *http.Request) {
}
//get the API Key
existingAPIKey, err := h.module.GetAPIKey(ctx, valuer.MustNewUUID(claims.OrgID), id)
existingAPIKey, err := h.setter.GetAPIKey(ctx, valuer.MustNewUUID(claims.OrgID), id)
if err != nil {
render.Error(w, err)
return
@@ -436,7 +436,7 @@ func (h *handler) RevokeAPIKey(w http.ResponseWriter, r *http.Request) {
return
}
if err := h.module.RevokeAPIKey(ctx, id, valuer.MustNewUUID(claims.UserID)); err != nil {
if err := h.setter.RevokeAPIKey(ctx, id, valuer.MustNewUUID(claims.UserID)); err != nil {
render.Error(w, err)
return
}

View File

@@ -17,7 +17,8 @@ import (
type service struct {
settings factory.ScopedProviderSettings
store types.UserStore
module user.Module
getter user.Getter
setter user.Setter
orgGetter organization.Getter
authz authz.AuthZ
config user.RootConfig
@@ -27,7 +28,8 @@ type service struct {
func NewService(
providerSettings factory.ProviderSettings,
store types.UserStore,
module user.Module,
getter user.Getter,
setter user.Setter,
orgGetter organization.Getter,
authz authz.AuthZ,
config user.RootConfig,
@@ -35,7 +37,8 @@ func NewService(
return &service{
settings: factory.NewScopedProviderSettings(providerSettings, "go.signoz.io/pkg/modules/user"),
store: store,
module: module,
getter: getter,
setter: setter,
orgGetter: orgGetter,
authz: authz,
config: config,
@@ -60,7 +63,7 @@ func (s *service) Start(ctx context.Context) error {
return nil
}
s.settings.Logger().WarnContext(ctx, "root user reconciliation failed, retrying", errors.Attr(err))
s.settings.Logger().WarnContext(ctx, "root user reconciliation failed, retrying", "error", err)
select {
case <-s.stopC:
@@ -85,12 +88,12 @@ func (s *service) reconcile(ctx context.Context) error {
if s.config.Org.ID.IsZero() {
newOrg := types.NewOrganization(s.config.Org.Name, s.config.Org.Name)
_, err := s.module.CreateFirstUser(ctx, newOrg, s.config.Email.String(), s.config.Email, s.config.Password)
_, err := s.setter.CreateFirstUser(ctx, newOrg, s.config.Email.String(), s.config.Email, s.config.Password)
return err
}
newOrg := types.NewOrganizationWithID(s.config.Org.ID, s.config.Org.Name, s.config.Org.Name)
_, err = s.module.CreateFirstUser(ctx, newOrg, s.config.Email.String(), s.config.Email, s.config.Password)
_, err = s.setter.CreateFirstUser(ctx, newOrg, s.config.Email.String(), s.config.Email, s.config.Password)
return err
}
@@ -103,44 +106,72 @@ func (s *service) reconcile(ctx context.Context) error {
}
func (s *service) reconcileRootUser(ctx context.Context, orgID valuer.UUID) error {
existingRoot, err := s.store.GetRootUserByOrgID(ctx, orgID)
existingStorableRoot, err := s.store.GetRootUserByOrgID(ctx, orgID)
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
return err
}
if existingRoot == nil {
if existingStorableRoot == nil {
return s.createOrPromoteRootUser(ctx, orgID)
}
return s.updateExistingRootUser(ctx, orgID, existingRoot)
return s.updateExistingRootUser(ctx, orgID, existingStorableRoot)
}
func (s *service) createOrPromoteRootUser(ctx context.Context, orgID valuer.UUID) error {
existingUser, err := s.module.GetNonDeletedUserByEmailAndOrgID(ctx, s.config.Email, orgID)
existingUser, err := s.getter.GetNonDeletedUserByEmailAndOrgID(ctx, s.config.Email, orgID)
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
return err
}
if existingUser != nil {
oldRole := existingUser.Role
existingUser.PromoteToRoot()
if err := s.module.UpdateAnyUser(ctx, orgID, existingUser); err != nil {
userRoles, err := s.getter.GetUserRoles(ctx, existingUser.ID)
if err != nil {
return err
}
if oldRole != types.RoleAdmin {
if err := s.authz.ModifyGrant(ctx,
orgID,
[]string{authtypes.MustGetSigNozManagedRoleFromExistingRole(oldRole)},
[]string{authtypes.MustGetSigNozManagedRoleFromExistingRole(types.RoleAdmin)},
authtypes.MustNewSubject(authtypes.TypeableUser, existingUser.ID.StringValue(), orgID, nil),
existingUserRoleNames := make([]string, len(userRoles))
for idx, userRole := range userRoles {
existingUserRoleNames[idx] = userRole.Role.Name
}
// idempotent - safe to retry can't put this in a txn
if err := s.authz.ModifyGrant(ctx,
orgID,
existingUserRoleNames,
[]string{authtypes.SigNozAdminRoleName},
authtypes.MustNewSubject(authtypes.TypeableUser, existingUser.ID.StringValue(), orgID, nil),
); err != nil {
return err
}
existingUser.PromoteToRoot()
err = s.store.RunInTx(ctx, func(ctx context.Context) error {
// update users table
deprecatedUser := types.NewDeprecatedUserFromUserAndRole(existingUser, types.RoleAdmin)
if err := s.setter.UpdateAnyUser(ctx, orgID, deprecatedUser); err != nil {
return err
}
// update user_role entries
if err := s.setter.ReplaceUserRoleEntries(
ctx,
existingUser.OrgID,
existingUser.ID,
[]string{authtypes.SigNozAdminRoleName},
); err != nil {
return err
}
// set password
return s.setPassword(ctx, existingUser.ID)
})
if err != nil {
return err
}
return s.setPassword(ctx, existingUser.ID)
return nil
}
// Create new root user
@@ -154,7 +185,7 @@ func (s *service) createOrPromoteRootUser(ctx context.Context, orgID valuer.UUID
return err
}
return s.module.CreateUser(ctx, newUser, user.WithFactorPassword(factorPassword))
return s.setter.CreateUser(ctx, newUser, user.WithFactorPassword(factorPassword), user.WithRoleNames([]string{authtypes.SigNozAdminRoleName}))
}
func (s *service) updateExistingRootUser(ctx context.Context, orgID valuer.UUID, existingRoot *types.User) error {
@@ -162,7 +193,8 @@ func (s *service) updateExistingRootUser(ctx context.Context, orgID valuer.UUID,
if existingRoot.Email != s.config.Email {
existingRoot.UpdateEmail(s.config.Email)
if err := s.module.UpdateAnyUser(ctx, orgID, existingRoot); err != nil {
deprecatedUser := types.NewDeprecatedUserFromUserAndRole(existingRoot, types.RoleAdmin)
if err := s.setter.UpdateAnyUser(ctx, orgID, deprecatedUser); err != nil {
return err
}
}

View File

@@ -2,13 +2,10 @@ package impluser
import (
"context"
"log/slog"
"slices"
"strings"
"time"
"github.com/dustin/go-humanize"
"github.com/SigNoz/signoz/pkg/analytics"
"github.com/SigNoz/signoz/pkg/authz"
"github.com/SigNoz/signoz/pkg/emailing"
@@ -23,37 +20,42 @@ import (
"github.com/SigNoz/signoz/pkg/types/emailtypes"
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/dustin/go-humanize"
)
type Module struct {
store types.UserStore
tokenizer tokenizer.Tokenizer
emailing emailing.Emailing
settings factory.ScopedProviderSettings
orgSetter organization.Setter
authz authz.AuthZ
analytics analytics.Analytics
config user.Config
type setter struct {
store types.UserStore
userRoleStore authtypes.UserRoleStore
tokenizer tokenizer.Tokenizer
emailing emailing.Emailing
settings factory.ScopedProviderSettings
orgSetter organization.Setter
authz authz.AuthZ
analytics analytics.Analytics
config user.Config
getter root.Getter
}
// This module is a WIP, don't take inspiration from this.
func NewModule(store types.UserStore, tokenizer tokenizer.Tokenizer, emailing emailing.Emailing, providerSettings factory.ProviderSettings, orgSetter organization.Setter, authz authz.AuthZ, analytics analytics.Analytics, config user.Config) root.Module {
func NewSetter(store types.UserStore, tokenizer tokenizer.Tokenizer, emailing emailing.Emailing, providerSettings factory.ProviderSettings, orgSetter organization.Setter, authz authz.AuthZ, analytics analytics.Analytics, config user.Config, userRoleStore authtypes.UserRoleStore, getter root.Getter) root.Setter {
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/pkg/modules/user/impluser")
return &Module{
store: store,
tokenizer: tokenizer,
emailing: emailing,
settings: settings,
orgSetter: orgSetter,
analytics: analytics,
authz: authz,
config: config,
return &setter{
store: store,
userRoleStore: userRoleStore,
tokenizer: tokenizer,
emailing: emailing,
settings: settings,
orgSetter: orgSetter,
analytics: analytics,
authz: authz,
config: config,
getter: getter,
}
}
// CreateBulk implements invite.Module.
func (m *Module) CreateBulkInvite(ctx context.Context, orgID valuer.UUID, userID valuer.UUID, bulkInvites *types.PostableBulkInviteRequest) ([]*types.Invite, error) {
creator, err := m.store.GetUser(ctx, userID)
func (module *setter) CreateBulkInvite(ctx context.Context, orgID valuer.UUID, userID valuer.UUID, bulkInvites *types.PostableBulkInviteRequest) ([]*types.Invite, error) {
creator, err := module.store.GetUser(ctx, userID)
if err != nil {
return nil, err
}
@@ -63,7 +65,7 @@ func (m *Module) CreateBulkInvite(ctx context.Context, orgID valuer.UUID, userID
for idx, invite := range bulkInvites.Invites {
emails[idx] = invite.Email.StringValue()
}
users, err := m.store.GetUsersByEmailsOrgIDAndStatuses(ctx, orgID, emails, []string{types.UserStatusActive.StringValue(), types.UserStatusPendingInvite.StringValue()})
users, err := module.store.GetUsersByEmailsOrgIDAndStatuses(ctx, orgID, emails, []string{types.UserStatusActive.StringValue(), types.UserStatusPendingInvite.StringValue()})
if err != nil {
return nil, err
}
@@ -83,39 +85,36 @@ func (m *Module) CreateBulkInvite(ctx context.Context, orgID valuer.UUID, userID
type userWithResetToken struct {
User *types.User
ResetPasswordToken *types.ResetPasswordToken
Role types.Role
}
newUsersWithResetToken := make([]*userWithResetToken, len(bulkInvites.Invites))
if err := m.store.RunInTx(ctx, func(ctx context.Context) error {
if err := module.store.RunInTx(ctx, func(ctx context.Context) error {
for idx, invite := range bulkInvites.Invites {
role, err := types.NewRole(invite.Role.String())
if err != nil {
return err
}
// create a new user with pending invite status
newUser, err := types.NewUser(invite.Name, invite.Email, role, orgID, types.UserStatusPendingInvite)
newUser, err := types.NewUser(invite.Name, invite.Email, orgID, types.UserStatusPendingInvite)
if err != nil {
return err
}
// store the user and password in db
err = m.createUserWithoutGrant(ctx, newUser)
err = module.createUserWithoutGrant(ctx, newUser, root.WithRoleNames([]string{authtypes.MustGetSigNozManagedRoleFromExistingRole(invite.Role)}))
if err != nil {
return err
}
// generate reset password token
resetPasswordToken, err := m.GetOrCreateResetPasswordToken(ctx, newUser.ID)
resetPasswordToken, err := module.GetOrCreateResetPasswordToken(ctx, newUser.ID)
if err != nil {
m.settings.Logger().ErrorContext(ctx, "failed to create reset password token for invited user", errors.Attr(err))
module.settings.Logger().ErrorContext(ctx, "failed to create reset password token for invited user", "error", err)
return err
}
newUsersWithResetToken[idx] = &userWithResetToken{
User: newUser,
ResetPasswordToken: resetPasswordToken,
Role: invite.Role,
}
}
return nil
@@ -127,9 +126,9 @@ func (m *Module) CreateBulkInvite(ctx context.Context, orgID valuer.UUID, userID
// send password reset emails to all the invited users
for idx, userWithToken := range newUsersWithResetToken {
m.analytics.TrackUser(ctx, orgID.String(), creator.ID.String(), "Invite Sent", map[string]any{
module.analytics.TrackUser(ctx, orgID.String(), creator.ID.String(), "Invite Sent", map[string]any{
"invitee_email": userWithToken.User.Email,
"invitee_role": userWithToken.User.Role,
"invitee_role": userWithToken.Role,
})
invite := &types.Invite{
@@ -139,7 +138,7 @@ func (m *Module) CreateBulkInvite(ctx context.Context, orgID valuer.UUID, userID
Name: userWithToken.User.DisplayName,
Email: userWithToken.User.Email,
Token: userWithToken.ResetPasswordToken.Token,
Role: userWithToken.User.Role,
Role: userWithToken.Role,
OrgID: userWithToken.User.OrgID,
TimeAuditable: types.TimeAuditable{
CreatedAt: userWithToken.User.CreatedAt,
@@ -151,38 +150,45 @@ func (m *Module) CreateBulkInvite(ctx context.Context, orgID valuer.UUID, userID
frontendBaseUrl := bulkInvites.Invites[idx].FrontendBaseUrl
if frontendBaseUrl == "" {
m.settings.Logger().InfoContext(ctx, "frontend base url is not provided, skipping email", slog.Any("invitee_email", userWithToken.User.Email))
module.settings.Logger().InfoContext(ctx, "frontend base url is not provided, skipping email", "invitee_email", userWithToken.User.Email)
continue
}
resetLink := userWithToken.ResetPasswordToken.FactorPasswordResetLink(frontendBaseUrl)
tokenLifetime := m.config.Password.Invite.MaxTokenLifetime
tokenLifetime := module.config.Password.Invite.MaxTokenLifetime
humanizedTokenLifetime := strings.TrimSpace(humanize.RelTime(time.Now(), time.Now().Add(tokenLifetime), "", ""))
if err := m.emailing.SendHTML(ctx, userWithToken.User.Email.String(), "You're Invited to Join SigNoz", emailtypes.TemplateNameInvitationEmail, map[string]any{
if err := module.emailing.SendHTML(ctx, userWithToken.User.Email.String(), "You're Invited to Join SigNoz", emailtypes.TemplateNameInvitationEmail, map[string]any{
"inviter_email": creator.Email,
"link": resetLink,
"Expiry": humanizedTokenLifetime,
}); err != nil {
m.settings.Logger().ErrorContext(ctx, "failed to send invite email", errors.Attr(err))
module.settings.Logger().ErrorContext(ctx, "failed to send invite email", "error", err)
}
}
return invites, nil
}
func (module *Module) CreateUser(ctx context.Context, input *types.User, opts ...root.CreateUserOption) error {
func (module *setter) CreateUser(ctx context.Context, user *types.User, opts ...root.CreateUserOption) error {
createUserOpts := root.NewCreateUserOptions(opts...)
// since assign is idempotant multiple calls to assign won't cause issues in case of retries.
err := module.authz.Grant(ctx, input.OrgID, []string{authtypes.MustGetSigNozManagedRoleFromExistingRole(input.Role)}, authtypes.MustNewSubject(authtypes.TypeableUser, input.ID.StringValue(), input.OrgID, nil))
if err != nil {
return err
if len(createUserOpts.RoleNames) > 0 {
err := module.authz.Grant(
ctx,
user.OrgID,
createUserOpts.RoleNames,
authtypes.MustNewSubject(authtypes.TypeableUser, user.ID.StringValue(), user.OrgID, nil),
)
if err != nil {
return err
}
}
if err := module.store.RunInTx(ctx, func(ctx context.Context) error {
if err := module.store.CreateUser(ctx, input); err != nil {
if err := module.store.CreateUser(ctx, user); err != nil {
return err
}
@@ -192,20 +198,28 @@ func (module *Module) CreateUser(ctx context.Context, input *types.User, opts ..
}
}
// create user_role entries
if len(createUserOpts.RoleNames) > 0 {
err := module.createUserRoleEntries(ctx, user.OrgID, user.ID, createUserOpts.RoleNames)
if err != nil {
return err
}
}
return nil
}); err != nil {
return err
}
traitsOrProperties := types.NewTraitsFromUser(input)
module.analytics.IdentifyUser(ctx, input.OrgID.String(), input.ID.String(), traitsOrProperties)
module.analytics.TrackUser(ctx, input.OrgID.String(), input.ID.String(), "User Created", traitsOrProperties)
traitsOrProperties := types.NewTraitsFromUser(user)
module.analytics.IdentifyUser(ctx, user.OrgID.String(), user.ID.String(), traitsOrProperties)
module.analytics.TrackUser(ctx, user.OrgID.String(), user.ID.String(), "User Created", traitsOrProperties)
return nil
}
func (m *Module) UpdateUser(ctx context.Context, orgID valuer.UUID, id string, user *types.User, updatedBy string) (*types.User, error) {
existingUser, err := m.store.GetUser(ctx, valuer.MustNewUUID(id))
func (module *setter) UpdateUser(ctx context.Context, orgID valuer.UUID, id string, user *types.DeprecatedUser, updatedBy string) (*types.DeprecatedUser, error) {
existingUser, err := module.getter.GetDeprecatedUserByOrgIDAndID(ctx, orgID, valuer.MustNewUUID(id))
if err != nil {
return nil, err
}
@@ -218,29 +232,24 @@ func (m *Module) UpdateUser(ctx context.Context, orgID valuer.UUID, id string, u
return nil, errors.WithAdditionalf(err, "cannot update deleted user")
}
requestor, err := m.store.GetUser(ctx, valuer.MustNewUUID(updatedBy))
requestor, err := module.getter.GetDeprecatedUserByOrgIDAndID(ctx, orgID, valuer.MustNewUUID(updatedBy))
if err != nil {
return nil, err
}
if user.Role != "" && user.Role != existingUser.Role && requestor.Role != types.RoleAdmin {
roleChange := user.Role != "" && user.Role != existingUser.Role
if roleChange && requestor.Role != types.RoleAdmin {
return nil, errors.New(errors.TypeForbidden, errors.CodeForbidden, "only admins can change roles")
}
// Make sure that the request is not demoting the last admin user.
if user.Role != "" && user.Role != existingUser.Role && existingUser.Role == types.RoleAdmin {
adminUsers, err := m.store.GetActiveUsersByRoleAndOrgID(ctx, types.RoleAdmin, orgID)
if err != nil {
return nil, err
}
if len(adminUsers) == 1 {
return nil, errors.New(errors.TypeForbidden, errors.CodeForbidden, "cannot demote the last admin")
}
// make sure the user is not demoting self from admin
if roleChange && existingUser.ID == requestor.ID && existingUser.Role == types.RoleAdmin && user.Role != types.RoleAdmin {
return nil, errors.New(errors.TypeForbidden, errors.CodeForbidden, "cannot change self role")
}
if user.Role != "" && user.Role != existingUser.Role {
err = m.authz.ModifyGrant(ctx,
if roleChange {
err = module.authz.ModifyGrant(ctx,
orgID,
[]string{authtypes.MustGetSigNozManagedRoleFromExistingRole(existingUser.Role)},
[]string{authtypes.MustGetSigNozManagedRoleFromExistingRole(user.Role)},
@@ -252,19 +261,41 @@ func (m *Module) UpdateUser(ctx context.Context, orgID valuer.UUID, id string, u
}
existingUser.Update(user.DisplayName, user.Role)
if err := m.UpdateAnyUser(ctx, orgID, existingUser); err != nil {
// update the user - idempotent (this does analytics too so keeping it outside txn)
if err := module.UpdateAnyUser(ctx, orgID, existingUser); err != nil {
return nil, err
}
err = module.store.RunInTx(ctx, func(ctx context.Context) error {
if roleChange {
// delete old role entries and create new ones
if err := module.userRoleStore.DeleteUserRoles(ctx, existingUser.ID); err != nil {
return err
}
// create new ones
if err := module.createUserRoleEntries(ctx, existingUser.OrgID, existingUser.ID, []string{authtypes.MustGetSigNozManagedRoleFromExistingRole(user.Role)}); err != nil {
return err
}
}
return nil
})
if err != nil {
return nil, err
}
return existingUser, nil
}
func (module *Module) UpdateAnyUser(ctx context.Context, orgID valuer.UUID, user *types.User) error {
func (module *setter) UpdateAnyUser(ctx context.Context, orgID valuer.UUID, deprecateUser *types.DeprecatedUser) error {
user := types.NewUserFromDeprecatedUser(deprecateUser)
if err := module.store.UpdateUser(ctx, orgID, user); err != nil {
return err
}
traits := types.NewTraitsFromUser(user)
traits := types.NewTraitsFromDeprecatedUser(deprecateUser)
module.analytics.IdentifyUser(ctx, user.OrgID.String(), user.ID.String(), traits)
module.analytics.TrackUser(ctx, user.OrgID.String(), user.ID.String(), "User Updated", traits)
@@ -275,7 +306,7 @@ func (module *Module) UpdateAnyUser(ctx context.Context, orgID valuer.UUID, user
return nil
}
func (module *Module) DeleteUser(ctx context.Context, orgID valuer.UUID, id string, deletedBy string) error {
func (module *setter) DeleteUser(ctx context.Context, orgID valuer.UUID, id string, deletedBy string) error {
user, err := module.store.GetUser(ctx, valuer.MustNewUUID(id))
if err != nil {
return err
@@ -293,18 +324,29 @@ func (module *Module) DeleteUser(ctx context.Context, orgID valuer.UUID, id stri
return errors.New(errors.TypeForbidden, errors.CodeForbidden, "integration user cannot be deleted")
}
// don't allow to delete the last admin user
adminUsers, err := module.store.GetActiveUsersByRoleAndOrgID(ctx, types.RoleAdmin, orgID)
deleter, err := module.store.GetUser(ctx, valuer.MustNewUUID(deletedBy))
if err != nil {
return err
}
if len(adminUsers) == 1 && user.Role == types.RoleAdmin {
return errors.New(errors.TypeForbidden, errors.CodeForbidden, "cannot delete the last admin")
if deleter.ID == user.ID {
return errors.New(errors.TypeForbidden, errors.CodeForbidden, "cannot self delete")
}
userRoles, err := module.getter.GetUserRoles(ctx, user.ID)
if err != nil {
return err
}
roleNames := roleNamesFromUserRoles(userRoles)
// since revoke is idempotant multiple calls to revoke won't cause issues in case of retries
err = module.authz.Revoke(ctx, orgID, []string{authtypes.MustGetSigNozManagedRoleFromExistingRole(user.Role)}, authtypes.MustNewSubject(authtypes.TypeableUser, id, orgID, nil))
err = module.authz.Revoke(
ctx,
orgID,
roleNames,
authtypes.MustNewSubject(authtypes.TypeableUser, id, orgID, nil),
)
if err != nil {
return err
}
@@ -321,7 +363,7 @@ func (module *Module) DeleteUser(ctx context.Context, orgID valuer.UUID, id stri
return nil
}
func (module *Module) GetOrCreateResetPasswordToken(ctx context.Context, userID valuer.UUID) (*types.ResetPasswordToken, error) {
func (module *setter) GetOrCreateResetPasswordToken(ctx context.Context, userID valuer.UUID) (*types.ResetPasswordToken, error) {
user, err := module.store.GetUser(ctx, userID)
if err != nil {
return nil, err
@@ -388,12 +430,12 @@ func (module *Module) GetOrCreateResetPasswordToken(ctx context.Context, userID
return resetPasswordToken, nil
}
func (module *Module) ForgotPassword(ctx context.Context, orgID valuer.UUID, email valuer.Email, frontendBaseURL string) error {
func (module *setter) ForgotPassword(ctx context.Context, orgID valuer.UUID, email valuer.Email, frontendBaseURL string) error {
if !module.config.Password.Reset.AllowSelf {
return errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "Users are not allowed to reset their password themselves, please contact an admin to reset your password.")
}
user, err := module.GetNonDeletedUserByEmailAndOrgID(ctx, email, orgID)
user, err := module.getter.GetNonDeletedUserByEmailAndOrgID(ctx, email, orgID)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
return nil // for security reasons
@@ -407,7 +449,7 @@ func (module *Module) ForgotPassword(ctx context.Context, orgID valuer.UUID, ema
token, err := module.GetOrCreateResetPasswordToken(ctx, user.ID)
if err != nil {
module.settings.Logger().ErrorContext(ctx, "failed to create reset password token", errors.Attr(err))
module.settings.Logger().ErrorContext(ctx, "failed to create reset password token", "error", err)
return err
}
@@ -429,14 +471,14 @@ func (module *Module) ForgotPassword(ctx context.Context, orgID valuer.UUID, ema
"Expiry": humanizedTokenLifetime,
},
); err != nil {
module.settings.Logger().ErrorContext(ctx, "failed to send reset password email", errors.Attr(err))
module.settings.Logger().ErrorContext(ctx, "failed to send reset password email", "error", err)
return nil
}
return nil
}
func (module *Module) UpdatePasswordByResetPasswordToken(ctx context.Context, token string, passwd string) error {
func (module *setter) UpdatePasswordByResetPasswordToken(ctx context.Context, token string, passwd string) error {
resetPasswordToken, err := module.store.GetResetPasswordToken(ctx, token)
if err != nil {
return err
@@ -469,12 +511,19 @@ func (module *Module) UpdatePasswordByResetPasswordToken(ctx context.Context, to
return err
}
userRoles, err := module.getter.GetUserRoles(ctx, user.ID)
if err != nil {
return err
}
roleNames := roleNamesFromUserRoles(userRoles)
// since grant is idempotent, multiple calls won't cause issues in case of retries
if user.Status == types.UserStatusPendingInvite {
if err = module.authz.Grant(
ctx,
user.OrgID,
[]string{authtypes.MustGetSigNozManagedRoleFromExistingRole(user.Role)},
roleNames,
authtypes.MustNewSubject(authtypes.TypeableUser, user.ID.StringValue(), user.OrgID, nil),
); err != nil {
return err
@@ -503,7 +552,7 @@ func (module *Module) UpdatePasswordByResetPasswordToken(ctx context.Context, to
})
}
func (module *Module) UpdatePassword(ctx context.Context, userID valuer.UUID, oldpasswd string, passwd string) error {
func (module *setter) UpdatePassword(ctx context.Context, userID valuer.UUID, oldpasswd string, passwd string) error {
user, err := module.store.GetUser(ctx, userID)
if err != nil {
return err
@@ -547,8 +596,10 @@ func (module *Module) UpdatePassword(ctx context.Context, userID valuer.UUID, ol
return module.tokenizer.DeleteTokensByUserID(ctx, userID)
}
func (module *Module) GetOrCreateUser(ctx context.Context, user *types.User, opts ...root.CreateUserOption) (*types.User, error) {
existingUser, err := module.GetNonDeletedUserByEmailAndOrgID(ctx, user.Email, user.OrgID)
func (module *setter) GetOrCreateUser(ctx context.Context, user *types.User, opts ...root.CreateUserOption) (*types.User, error) {
createUserOpts := root.NewCreateUserOptions(opts...)
existingUser, err := module.getter.GetNonDeletedUserByEmailAndOrgID(ctx, user.Email, user.OrgID)
if err != nil {
if !errors.Ast(err, errors.TypeNotFound) {
return nil, err
@@ -556,48 +607,89 @@ func (module *Module) GetOrCreateUser(ctx context.Context, user *types.User, opt
}
if existingUser != nil {
// for users logging through SSO flow but are having status as pending_invite
if existingUser.Status == types.UserStatusPendingInvite {
// respect the role coming from the SSO
existingUser.Update("", user.Role)
// activate the user
if err = module.activatePendingUser(ctx, existingUser); err != nil {
if createUserOpts.RoleNames != nil {
if err = module.activatePendingUser(ctx, existingUser, root.WithRoleNames(createUserOpts.RoleNames)); err != nil {
return nil, err
}
} else {
userRoles, err := module.getter.GetUserRoles(ctx, existingUser.ID)
if err != nil {
return nil, err
}
existingRoleNames := roleNamesFromUserRoles(userRoles)
if err = module.activatePendingUser(ctx, existingUser, root.WithRoleNames(existingRoleNames)); err != nil {
return nil, err
}
}
return existingUser, nil
}
if createUserOpts.RoleNames != nil {
userRoles, err := module.getter.GetUserRoles(ctx, existingUser.ID)
if err != nil {
return nil, err
}
existingRoleNames := roleNamesFromUserRoles(userRoles)
if !sameRoleNames(existingRoleNames, createUserOpts.RoleNames) {
if err := module.authz.ModifyGrant(
ctx,
existingUser.OrgID,
existingRoleNames,
createUserOpts.RoleNames,
authtypes.MustNewSubject(authtypes.TypeableUser, existingUser.ID.StringValue(), existingUser.OrgID, nil),
); err != nil {
return nil, err
}
if err := module.ReplaceUserRoleEntries(ctx, existingUser.OrgID, existingUser.ID, createUserOpts.RoleNames); err != nil {
return nil, err
}
if err := module.tokenizer.DeleteIdentity(ctx, existingUser.ID); err != nil {
return nil, err
}
}
}
return existingUser, nil
}
err = module.CreateUser(ctx, user, opts...)
if err != nil {
if createUserOpts.RoleNames == nil {
opts = append(opts, root.WithRoleNames([]string{authtypes.SigNozViewerRoleName}))
}
if err := module.CreateUser(ctx, user, opts...); err != nil {
return nil, err
}
return user, nil
}
func (m *Module) CreateAPIKey(ctx context.Context, apiKey *types.StorableAPIKey) error {
return m.store.CreateAPIKey(ctx, apiKey)
func (module *setter) CreateAPIKey(ctx context.Context, apiKey *types.StorableAPIKey) error {
return module.store.CreateAPIKey(ctx, apiKey)
}
func (m *Module) UpdateAPIKey(ctx context.Context, id valuer.UUID, apiKey *types.StorableAPIKey, updaterID valuer.UUID) error {
return m.store.UpdateAPIKey(ctx, id, apiKey, updaterID)
func (module *setter) UpdateAPIKey(ctx context.Context, id valuer.UUID, apiKey *types.StorableAPIKey, updaterID valuer.UUID) error {
return module.store.UpdateAPIKey(ctx, id, apiKey, updaterID)
}
func (m *Module) ListAPIKeys(ctx context.Context, orgID valuer.UUID) ([]*types.StorableAPIKeyUser, error) {
return m.store.ListAPIKeys(ctx, orgID)
func (module *setter) ListAPIKeys(ctx context.Context, orgID valuer.UUID) ([]*types.StorableAPIKeyUser, error) {
return module.store.ListAPIKeys(ctx, orgID)
}
func (m *Module) GetAPIKey(ctx context.Context, orgID, id valuer.UUID) (*types.StorableAPIKeyUser, error) {
return m.store.GetAPIKey(ctx, orgID, id)
func (module *setter) GetAPIKey(ctx context.Context, orgID, id valuer.UUID) (*types.StorableAPIKeyUser, error) {
return module.store.GetAPIKey(ctx, orgID, id)
}
func (m *Module) RevokeAPIKey(ctx context.Context, id, removedByUserID valuer.UUID) error {
return m.store.RevokeAPIKey(ctx, id, removedByUserID)
func (module *setter) RevokeAPIKey(ctx context.Context, id, removedByUserID valuer.UUID) error {
return module.store.RevokeAPIKey(ctx, id, removedByUserID)
}
func (module *Module) CreateFirstUser(ctx context.Context, organization *types.Organization, name string, email valuer.Email, passwd string) (*types.User, error) {
func (module *setter) CreateFirstUser(ctx context.Context, organization *types.Organization, name string, email valuer.Email, passwd string) (*types.User, error) {
user, err := types.NewRootUser(name, email, organization.ID)
if err != nil {
return nil, err
@@ -614,6 +706,8 @@ func (module *Module) CreateFirstUser(ctx context.Context, organization *types.O
return nil, err
}
roleNames := []string{authtypes.SigNozAdminRoleName}
if err = module.store.RunInTx(ctx, func(ctx context.Context) error {
err = module.orgSetter.Create(ctx, organization, func(ctx context.Context, orgID valuer.UUID) error {
err = module.authz.CreateManagedRoles(ctx, orgID, managedRoles)
@@ -627,7 +721,7 @@ func (module *Module) CreateFirstUser(ctx context.Context, organization *types.O
return err
}
err = module.createUserWithoutGrant(ctx, user, root.WithFactorPassword(password))
err = module.CreateUser(ctx, user, root.WithFactorPassword(password), root.WithRoleNames(roleNames))
if err != nil {
return err
}
@@ -640,7 +734,7 @@ func (module *Module) CreateFirstUser(ctx context.Context, organization *types.O
return user, nil
}
func (module *Module) Collect(ctx context.Context, orgID valuer.UUID) (map[string]any, error) {
func (module *setter) Collect(ctx context.Context, orgID valuer.UUID) (map[string]any, error) {
stats := make(map[string]any)
counts, err := module.store.CountByOrgIDAndStatuses(ctx, orgID, []string{types.UserStatusActive.StringValue(), types.UserStatusDeleted.StringValue(), types.UserStatusPendingInvite.StringValue()})
if err == nil {
@@ -658,32 +752,10 @@ func (module *Module) Collect(ctx context.Context, orgID valuer.UUID) (map[strin
return stats, nil
}
// this function restricts that only one non-deleted user email can exist for an org ID, if found more, it throws an error
func (module *Module) GetNonDeletedUserByEmailAndOrgID(ctx context.Context, email valuer.Email, orgID valuer.UUID) (*types.User, error) {
existingUsers, err := module.store.GetUsersByEmailAndOrgID(ctx, email, orgID)
if err != nil {
return nil, err
}
// filter out the deleted users
existingUsers = slices.DeleteFunc(existingUsers, func(user *types.User) bool { return user.ErrIfDeleted() != nil })
if len(existingUsers) > 1 {
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "Multiple non-deleted users found for email %s in org_id: %s", email.StringValue(), orgID.StringValue())
}
if len(existingUsers) == 1 {
return existingUsers[0], nil
}
return nil, errors.Newf(errors.TypeNotFound, errors.CodeNotFound, "No non-deleted user found with email %s in org_id: %s", email.StringValue(), orgID.StringValue())
}
func (module *Module) createUserWithoutGrant(ctx context.Context, input *types.User, opts ...root.CreateUserOption) error {
func (module *setter) createUserWithoutGrant(ctx context.Context, user *types.User, opts ...root.CreateUserOption) error {
createUserOpts := root.NewCreateUserOptions(opts...)
if err := module.store.RunInTx(ctx, func(ctx context.Context) error {
if err := module.store.CreateUser(ctx, input); err != nil {
if err := module.store.CreateUser(ctx, user); err != nil {
return err
}
@@ -693,36 +765,110 @@ func (module *Module) createUserWithoutGrant(ctx context.Context, input *types.U
}
}
// create user_role entries
if len(createUserOpts.RoleNames) > 0 {
err := module.createUserRoleEntries(ctx, user.OrgID, user.ID, createUserOpts.RoleNames)
if err != nil {
return err
}
}
return nil
}); err != nil {
return err
}
traitsOrProperties := types.NewTraitsFromUser(input)
module.analytics.IdentifyUser(ctx, input.OrgID.String(), input.ID.String(), traitsOrProperties)
module.analytics.TrackUser(ctx, input.OrgID.String(), input.ID.String(), "User Created", traitsOrProperties)
traitsOrProperties := types.NewTraitsFromUser(user)
module.analytics.IdentifyUser(ctx, user.OrgID.String(), user.ID.String(), traitsOrProperties)
module.analytics.TrackUser(ctx, user.OrgID.String(), user.ID.String(), "User Created", traitsOrProperties)
return nil
}
func (module *Module) activatePendingUser(ctx context.Context, user *types.User) error {
err := module.authz.Grant(
ctx,
user.OrgID,
[]string{authtypes.MustGetSigNozManagedRoleFromExistingRole(user.Role)},
authtypes.MustNewSubject(authtypes.TypeableUser, user.ID.StringValue(), user.OrgID, nil),
)
func (module *setter) createUserRoleEntries(ctx context.Context, orgID, userId valuer.UUID, roleNames []string) error {
roles, err := module.authz.ListByOrgIDAndNames(ctx, orgID, roleNames)
if err != nil {
return err
}
userRoles := authtypes.NewStorableUserRoles(userId, roles)
return module.userRoleStore.CreateUserRoles(ctx, userRoles)
}
func (module *setter) activatePendingUser(ctx context.Context, user *types.User, opts ...root.CreateUserOption) error {
createUserOpts := root.NewCreateUserOptions(opts...)
if len(createUserOpts.RoleNames) > 0 {
err := module.authz.Grant(
ctx,
user.OrgID,
createUserOpts.RoleNames,
authtypes.MustNewSubject(authtypes.TypeableUser, user.ID.StringValue(), user.OrgID, nil),
)
if err != nil {
return err
}
}
if err := user.UpdateStatus(types.UserStatusActive); err != nil {
return err
}
err = module.store.UpdateUser(ctx, user.OrgID, user)
err := module.store.RunInTx(ctx, func(ctx context.Context) error {
if err := module.store.UpdateUser(ctx, user.OrgID, user); err != nil {
return err
}
if len(createUserOpts.RoleNames) > 0 {
// delete old user_role entries and create new ones from SSO
if err := module.userRoleStore.DeleteUserRoles(ctx, user.ID); err != nil {
return err
}
return module.createUserRoleEntries(ctx, user.OrgID, user.ID, createUserOpts.RoleNames)
}
return nil
})
if err != nil {
return err
}
return nil
}
func (module *setter) ReplaceUserRoleEntries(ctx context.Context, orgID, userID valuer.UUID, finalRoleNames []string) error {
return module.store.RunInTx(ctx, func(ctx context.Context) error {
// delete old user_role entries and create new ones from SSO
if err := module.userRoleStore.DeleteUserRoles(ctx, userID); err != nil {
return err
}
// create fresh ones
return module.createUserRoleEntries(ctx, orgID, userID, finalRoleNames)
})
}
func sameRoleNames(a, b []string) bool {
if len(a) != len(b) {
return false
}
aa := slices.Clone(a)
bb := slices.Clone(b)
slices.Sort(aa)
slices.Sort(bb)
return slices.Equal(aa, bb)
}
func roleNamesFromUserRoles(userRoles []*authtypes.UserRole) []string {
names := make([]string, 0, len(userRoles))
for _, ur := range userRoles {
if ur.Role != nil {
names = append(names, ur.Role.Name)
}
}
return names
}

View File

@@ -52,23 +52,6 @@ func (store *store) CreateUser(ctx context.Context, user *types.User) error {
return nil
}
func (store *store) GetUsersByEmail(ctx context.Context, email valuer.Email) ([]*types.User, error) {
var users []*types.User
err := store.
sqlstore.
BunDBCtx(ctx).
NewSelect().
Model(&users).
Where("email = ?", email).
Scan(ctx)
if err != nil {
return nil, err
}
return users, nil
}
func (store *store) GetUser(ctx context.Context, id valuer.UUID) (*types.User, error) {
user := new(types.User)
@@ -122,25 +105,6 @@ func (store *store) GetUsersByEmailAndOrgID(ctx context.Context, email valuer.Em
return users, nil
}
func (store *store) GetActiveUsersByRoleAndOrgID(ctx context.Context, role types.Role, orgID valuer.UUID) ([]*types.User, error) {
var users []*types.User
err := store.
sqlstore.
BunDBCtx(ctx).
NewSelect().
Model(&users).
Where("org_id = ?", orgID).
Where("role = ?", role).
Where("status = ?", types.UserStatusActive.StringValue()).
Scan(ctx)
if err != nil {
return nil, err
}
return users, nil
}
func (store *store) UpdateUser(ctx context.Context, orgID valuer.UUID, user *types.User) error {
_, err := store.
sqlstore.
@@ -149,7 +113,6 @@ func (store *store) UpdateUser(ctx context.Context, orgID valuer.UUID, user *typ
Model(user).
Column("display_name").
Column("email").
Column("role").
Column("is_root").
Column("updated_at").
Column("status").
@@ -162,7 +125,7 @@ func (store *store) UpdateUser(ctx context.Context, orgID valuer.UUID, user *typ
return nil
}
func (store *store) ListUsersByOrgID(ctx context.Context, orgID valuer.UUID) ([]*types.GettableUser, error) {
func (store *store) ListUsersByOrgID(ctx context.Context, orgID valuer.UUID) ([]*types.User, error) {
users := []*types.User{}
err := store.

View File

@@ -0,0 +1,83 @@
package impluser
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
)
type userRoleStore struct {
sqlstore sqlstore.SQLStore
settings factory.ProviderSettings
}
func NewUserRoleStore(sqlstore sqlstore.SQLStore, settings factory.ProviderSettings) authtypes.UserRoleStore {
return &userRoleStore{sqlstore: sqlstore, settings: settings}
}
func (store *userRoleStore) ListUserRolesByOrgIDAndUserIDs(ctx context.Context, orgID valuer.UUID, userIDs []valuer.UUID) ([]*authtypes.UserRole, error) {
userRoles := make([]*authtypes.UserRole, 0)
err := store.sqlstore.
BunDBCtx(ctx).
NewSelect().
Model(&userRoles).
Join("JOIN users").
JoinOn("users.id = user_role.user_id").
Where("users.org_id = ?", orgID).
Where("users.id IN (?)", bun.In(userIDs)).
Relation("Role").
Scan(ctx)
if err != nil {
return nil, err
}
return userRoles, nil
}
func (store *userRoleStore) CreateUserRoles(ctx context.Context, userRoles []*authtypes.UserRole) error {
_, err := store.sqlstore.
BunDBCtx(ctx).
NewInsert().
Model(&userRoles).
Exec(ctx)
if err != nil {
return store.sqlstore.WrapAlreadyExistsErrf(err, authtypes.ErrCodeUserRoleAlreadyExists, "duplicate role assignments for service account")
}
return nil
}
func (store *userRoleStore) DeleteUserRoles(ctx context.Context, userID valuer.UUID) error {
_, err := store.sqlstore.
BunDBCtx(ctx).
NewDelete().
Model(new(authtypes.UserRole)).
Where("user_id = ?", userID).
Exec(ctx)
if err != nil {
return err
}
return nil
}
func (store *userRoleStore) GetUserRolesByUserID(ctx context.Context, userID valuer.UUID) ([]*authtypes.UserRole, error) {
userRoles := make([]*authtypes.UserRole, 0)
err := store.sqlstore.
BunDBCtx(ctx).
NewSelect().
Model(&userRoles).
Where("user_id = ?", userID).
Relation("Role").
Scan(ctx)
if err != nil {
return nil, err
}
return userRoles, nil
}

View File

@@ -7,6 +7,7 @@ import (
type createUserOptions struct {
FactorPassword *types.FactorPassword
RoleNames []string
}
type CreateUserOption func(*createUserOptions)
@@ -17,9 +18,16 @@ func WithFactorPassword(factorPassword *types.FactorPassword) CreateUserOption {
}
}
func WithRoleNames(roleNames []string) CreateUserOption {
return func(o *createUserOptions) {
o.RoleNames = roleNames
}
}
func NewCreateUserOptions(opts ...CreateUserOption) *createUserOptions {
o := &createUserOptions{
FactorPassword: nil,
RoleNames: nil,
}
for _, opt := range opts {

View File

@@ -6,10 +6,11 @@ import (
"github.com/SigNoz/signoz/pkg/statsreporter"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type Module interface {
type Setter interface {
// Creates the organization and the first user of that organization.
CreateFirstUser(ctx context.Context, organization *types.Organization, name string, email valuer.Email, password string) (*types.User, error)
@@ -33,10 +34,10 @@ type Module interface {
// Initiate forgot password flow for a user
ForgotPassword(ctx context.Context, orgID valuer.UUID, email valuer.Email, frontendBaseURL string) error
UpdateUser(ctx context.Context, orgID valuer.UUID, id string, user *types.User, updatedBy string) (*types.User, error)
UpdateUser(ctx context.Context, orgID valuer.UUID, id string, user *types.DeprecatedUser, updatedBy string) (*types.DeprecatedUser, error)
// UpdateAnyUser updates a user and persists the changes to the database along with the analytics and identity deletion.
UpdateAnyUser(ctx context.Context, orgID valuer.UUID, user *types.User) error
UpdateAnyUser(ctx context.Context, orgID valuer.UUID, user *types.DeprecatedUser) error
DeleteUser(ctx context.Context, orgID valuer.UUID, id string, deletedBy string) error
// invite
@@ -49,26 +50,24 @@ type Module interface {
RevokeAPIKey(ctx context.Context, id, removedByUserID valuer.UUID) error
GetAPIKey(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*types.StorableAPIKeyUser, error)
GetNonDeletedUserByEmailAndOrgID(ctx context.Context, email valuer.Email, orgID valuer.UUID) (*types.User, error)
// Roles
ReplaceUserRoleEntries(ctx context.Context, orgID, userID valuer.UUID, finalRoleNames []string) error
statsreporter.StatsCollector
}
type Getter interface {
// Get root user by org id.
GetRootUserByOrgID(context.Context, valuer.UUID) (*types.User, error)
GetRootUserByOrgID(context.Context, valuer.UUID) (*types.User, []*authtypes.UserRole, error)
// Get gets the users based on the given id
ListByOrgID(context.Context, valuer.UUID) ([]*types.User, error)
ListByOrgID(context.Context, valuer.UUID) ([]*types.DeprecatedUser, error)
// Get users by email.
GetUsersByEmail(context.Context, valuer.Email) ([]*types.User, error)
// Get user by orgID and id.
GetByOrgIDAndID(context.Context, valuer.UUID, valuer.UUID) (*types.User, error)
// Get deprecated user object by orgID and id.
GetDeprecatedUserByOrgIDAndID(context.Context, valuer.UUID, valuer.UUID) (*types.DeprecatedUser, error)
// Get user by id.
Get(context.Context, valuer.UUID) (*types.User, error)
Get(context.Context, valuer.UUID) (*types.DeprecatedUser, error)
// List users by email and org ids.
ListUsersByEmailAndOrgIDs(context.Context, valuer.Email, []valuer.UUID) ([]*types.User, error)
@@ -81,6 +80,12 @@ type Getter interface {
// Get factor password by user id.
GetFactorPasswordByUserID(context.Context, valuer.UUID) (*types.FactorPassword, error)
// Gets single Non-Deleted user by email and org id
GetNonDeletedUserByEmailAndOrgID(ctx context.Context, email valuer.Email, orgID valuer.UUID) (*types.User, error)
// Gets user_role with roles entries from db
GetUserRoles(ctx context.Context, userID valuer.UUID) ([]*authtypes.UserRole, error)
}
type Handler interface {

View File

@@ -5,7 +5,6 @@ import (
"context"
"encoding/json"
"fmt"
"log/slog"
"net/http"
"runtime/debug"
"strconv"
@@ -59,10 +58,10 @@ func (handler *handler) QueryRange(rw http.ResponseWriter, req *http.Request) {
queryJSON, _ := json.Marshal(queryRangeRequest)
handler.set.Logger.ErrorContext(ctx, "panic in QueryRange",
slog.Any("error", r),
slog.Any("user", claims.UserID),
slog.String("payload", string(queryJSON)),
slog.String("stacktrace", stackTrace),
"error", r,
"user", claims.UserID,
"payload", string(queryJSON),
"stacktrace", stackTrace,
)
render.Error(rw, errors.NewInternalf(
@@ -159,10 +158,10 @@ func (handler *handler) QueryRawStream(rw http.ResponseWriter, req *http.Request
queryJSON, _ := json.Marshal(queryRangeRequest)
handler.set.Logger.ErrorContext(ctx, "panic in QueryRawStream",
slog.Any("error", r),
slog.Any("user", claims.UserID),
slog.String("payload", string(queryJSON)),
slog.String("stacktrace", stackTrace),
"error", r,
"user", claims.UserID,
"payload", string(queryJSON),
"stacktrace", stackTrace,
)
render.Error(rw, errors.NewInternalf(

View File

@@ -47,19 +47,19 @@ func (bc *bucketCache) GetMissRanges(
// Get query window
startMs, endMs := q.Window()
bc.logger.DebugContext(ctx, "getting miss ranges", slog.String("fingerprint", q.Fingerprint()), slog.Uint64("start", startMs), slog.Uint64("end", endMs))
bc.logger.DebugContext(ctx, "getting miss ranges", "fingerprint", q.Fingerprint(), "start", startMs, "end", endMs)
// Generate cache key
cacheKey := bc.generateCacheKey(q)
bc.logger.DebugContext(ctx, "cache key", slog.String("cache_key", cacheKey))
bc.logger.DebugContext(ctx, "cache key", "cache_key", cacheKey)
// Try to get cached data
var data qbtypes.CachedData
err := bc.cache.Get(ctx, orgID, cacheKey, &data)
if err != nil {
if !errors.Ast(err, errors.TypeNotFound) {
bc.logger.ErrorContext(ctx, "error getting cached data", errors.Attr(err))
bc.logger.ErrorContext(ctx, "error getting cached data", "error", err)
}
// No cached data, need to fetch entire range
missing = []*qbtypes.TimeRange{{From: startMs, To: endMs}}
@@ -71,7 +71,7 @@ func (bc *bucketCache) GetMissRanges(
// Find missing ranges with step alignment
missing = bc.findMissingRangesWithStep(data.Buckets, startMs, endMs, stepMs)
bc.logger.DebugContext(ctx, "missing ranges", slog.Any("missing", missing), slog.Uint64("step", stepMs))
bc.logger.DebugContext(ctx, "missing ranges", "missing", missing, "step", stepMs)
// If no cached data overlaps with requested range, return empty result
if len(data.Buckets) == 0 {
@@ -105,9 +105,9 @@ func (bc *bucketCache) Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Que
// If the entire range is within flux interval, skip caching
if startMs >= fluxBoundary {
bc.logger.DebugContext(ctx, "entire range within flux interval, skipping cache",
slog.Uint64("start", startMs),
slog.Uint64("end", endMs),
slog.Uint64("flux_boundary", fluxBoundary))
"start", startMs,
"end", endMs,
"flux_boundary", fluxBoundary)
return
}
@@ -116,8 +116,8 @@ func (bc *bucketCache) Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Que
if endMs > fluxBoundary {
cachableEndMs = fluxBoundary
bc.logger.DebugContext(ctx, "adjusting end time to exclude flux interval",
slog.Uint64("original_end", endMs),
slog.Uint64("cachable_end", cachableEndMs))
"original_end", endMs,
"cachable_end", cachableEndMs)
}
// Generate cache key
@@ -155,11 +155,11 @@ func (bc *bucketCache) Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Que
// If after adjustment we have no complete intervals, don't cache
if cachableStartMs >= cachableEndMs {
bc.logger.DebugContext(ctx, "no complete intervals to cache",
slog.Uint64("original_start", startMs),
slog.Uint64("original_end", endMs),
slog.Uint64("adjusted_start", cachableStartMs),
slog.Uint64("adjusted_end", cachableEndMs),
slog.Uint64("step", stepMs))
"original_start", startMs,
"original_end", endMs,
"adjusted_start", cachableStartMs,
"adjusted_end", cachableEndMs,
"step", stepMs)
return
}
}
@@ -187,7 +187,7 @@ func (bc *bucketCache) Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Que
// Marshal and store in cache
if err := bc.cache.Set(ctx, orgID, cacheKey, &updatedData, bc.cacheTTL); err != nil {
bc.logger.ErrorContext(ctx, "error setting cached data", errors.Attr(err))
bc.logger.ErrorContext(ctx, "error setting cached data", "error", err)
}
}
@@ -471,7 +471,7 @@ func (bc *bucketCache) mergeTimeSeriesValues(ctx context.Context, buckets []*qbt
for _, bucket := range buckets {
var tsData *qbtypes.TimeSeriesData
if err := json.Unmarshal(bucket.Value, &tsData); err != nil {
bc.logger.ErrorContext(ctx, "failed to unmarshal time series data", errors.Attr(err))
bc.logger.ErrorContext(ctx, "failed to unmarshal time series data", "error", err)
continue
}
@@ -623,7 +623,7 @@ func (bc *bucketCache) resultToBuckets(ctx context.Context, result *qbtypes.Resu
// In the future, we could split large ranges into smaller buckets
valueBytes, err := json.Marshal(result.Value)
if err != nil {
bc.logger.ErrorContext(ctx, "failed to marshal result value", errors.Attr(err))
bc.logger.ErrorContext(ctx, "failed to marshal result value", "error", err)
return nil
}

View File

@@ -3,15 +3,12 @@ package querier
import (
"context"
"fmt"
"log/slog"
"math"
"slices"
"sort"
"strings"
"github.com/SigNoz/govaluate"
"github.com/SigNoz/signoz/pkg/errors"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
)
@@ -361,14 +358,14 @@ func (q *querier) processTimeSeriesFormula(
// Create formula evaluator
evaluator, err := qbtypes.NewFormulaEvaluator(formula.Expression, canDefaultZero)
if err != nil {
q.logger.ErrorContext(ctx, "failed to create formula evaluator", errors.Attr(err), slog.String("formula", formula.Name))
q.logger.ErrorContext(ctx, "failed to create formula evaluator", "error", err, "formula", formula.Name)
return nil
}
// Evaluate the formula
formulaSeries, err := evaluator.EvaluateFormula(timeSeriesData)
if err != nil {
q.logger.ErrorContext(ctx, "failed to evaluate formula", errors.Attr(err), slog.String("formula", formula.Name))
q.logger.ErrorContext(ctx, "failed to evaluate formula", "error", err, "formula", formula.Name)
return nil
}
@@ -511,13 +508,13 @@ func (q *querier) processScalarFormula(
canDefaultZero := req.GetQueriesSupportingZeroDefault()
evaluator, err := qbtypes.NewFormulaEvaluator(formula.Expression, canDefaultZero)
if err != nil {
q.logger.ErrorContext(ctx, "failed to create formula evaluator", errors.Attr(err), slog.String("formula", formula.Name))
q.logger.ErrorContext(ctx, "failed to create formula evaluator", "error", err, "formula", formula.Name)
return nil
}
formulaSeries, err := evaluator.EvaluateFormula(timeSeriesData)
if err != nil {
q.logger.ErrorContext(ctx, "failed to evaluate formula", errors.Attr(err), slog.String("formula", formula.Name))
q.logger.ErrorContext(ctx, "failed to evaluate formula", "error", err, "formula", formula.Name)
return nil
}

View File

@@ -11,10 +11,6 @@ import (
"text/template"
"time"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/prometheus"
"github.com/SigNoz/signoz/pkg/querybuilder"
@@ -22,6 +18,9 @@ import (
"github.com/SigNoz/signoz/pkg/types/instrumentationtypes"
qbv5 "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
)
// unquotedDottedNamePattern matches unquoted identifiers containing dots
@@ -93,7 +92,7 @@ func newPromqlQuery(
func (q *promqlQuery) Fingerprint() string {
query, err := q.renderVars(q.query.Query, q.vars, q.tr.From, q.tr.To)
if err != nil {
q.logger.ErrorContext(context.TODO(), "failed render template variables", slog.String("query", q.query.Query))
q.logger.ErrorContext(context.TODO(), "failed render template variables", "query", q.query.Query)
return ""
}
parts := []string{
@@ -136,7 +135,7 @@ func (q *promqlQuery) removeAllVarMatchers(query string, vars map[string]qbv5.Va
// Create visitor and walk the AST
visitor := &allVarRemover{allVars: allVars}
if err := parser.Walk(visitor, expr, nil); err != nil {
q.logger.ErrorContext(context.TODO(), "unexpected error while removing __all__ variable matchers", errors.Attr(err), slog.String("query", query))
q.logger.ErrorContext(context.TODO(), "unexpected error while removing __all__ variable matchers", "error", err, "query", query)
return "", errors.WrapInternalf(err, errors.CodeInternal, "error while removing __all__ variable matchers")
}

View File

@@ -10,9 +10,6 @@ import (
"sync"
"time"
"github.com/dustin/go-humanize"
"golang.org/x/exp/maps"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/prometheus"
@@ -23,6 +20,8 @@ import (
"github.com/SigNoz/signoz/pkg/types/instrumentationtypes"
"github.com/SigNoz/signoz/pkg/types/metrictypes"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/dustin/go-humanize"
"golang.org/x/exp/maps"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/valuer"
@@ -369,10 +368,10 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
var err error
metricTemporality, metricTypes, err = q.metadataStore.FetchTemporalityAndTypeMulti(ctx, req.Start, req.End, metricNames...)
if err != nil {
q.logger.WarnContext(ctx, "failed to fetch metric temporality", errors.Attr(err), slog.Any("metrics", metricNames))
q.logger.WarnContext(ctx, "failed to fetch metric temporality", "error", err, "metrics", metricNames)
return nil, errors.NewInternalf(errors.CodeInternal, "failed to fetch metrics temporality")
}
q.logger.DebugContext(ctx, "fetched metric temporalities and types", slog.Any("metric_temporality", metricTemporality), slog.Any("metric_types", metricTypes))
q.logger.DebugContext(ctx, "fetched metric temporalities and types", "metric_temporality", metricTemporality, "metric_types", metricTypes)
}
for i := range spec.Aggregations {
if spec.Aggregations[i].MetricName != "" && spec.Aggregations[i].Temporality == metrictypes.Unknown {
@@ -591,9 +590,9 @@ func (q *querier) run(
// Skip cache if NoCache is set, or if cache is not available
if req.NoCache || q.bucketCache == nil || query.Fingerprint() == "" {
if req.NoCache {
q.logger.DebugContext(ctx, "NoCache flag set, bypassing cache", slog.String("query", name))
q.logger.DebugContext(ctx, "NoCache flag set, bypassing cache", "query", name)
} else {
q.logger.InfoContext(ctx, "no bucket cache or fingerprint, executing query", slog.String("fingerprint", query.Fingerprint()))
q.logger.InfoContext(ctx, "no bucket cache or fingerprint, executing query", "fingerprint", query.Fingerprint())
}
result, err := query.Execute(ctx)
qbEvent.HasData = qbEvent.HasData || hasData(result)
@@ -710,8 +709,8 @@ func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query
totalStats := qbtypes.ExecStats{}
q.logger.DebugContext(ctx, "executing queries for missing ranges",
slog.Int("missing_ranges_count", len(missingRanges)),
slog.Any("ranges", missingRanges))
"missing_ranges_count", len(missingRanges),
"ranges", missingRanges)
sem := make(chan struct{}, 4)
var wg sync.WaitGroup
@@ -749,7 +748,7 @@ func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query
for _, err := range errs {
if err != nil {
// If any query failed, fall back to full execution
q.logger.ErrorContext(ctx, "parallel query execution failed", errors.Attr(err))
q.logger.ErrorContext(ctx, "parallel query execution failed", "error", err)
result, err := query.Execute(ctx)
if err != nil {
return nil, err

View File

@@ -9,14 +9,13 @@ import (
"log/slog"
"golang.org/x/exp/slices"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/opamptypes"
"github.com/SigNoz/signoz/pkg/valuer"
"golang.org/x/exp/slices"
)
var (
@@ -136,7 +135,7 @@ func (r *Repo) insertConfig(
configVersion, err := r.GetLatestVersion(ctx, orgId, c.ElementType)
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
slog.ErrorContext(ctx, "failed to fetch latest config version", errors.Attr(err))
slog.ErrorContext(ctx, "failed to fetch latest config version", "error", err)
return err
}
@@ -156,11 +155,11 @@ func (r *Repo) insertConfig(
// Delete elements first, then version (to respect potential foreign key constraints)
_, delErr := r.store.BunDB().NewDelete().Model(new(opamptypes.AgentConfigElement)).Where("version_id = ?", c.ID).Exec(ctx)
if delErr != nil {
slog.ErrorContext(ctx, "failed to delete config elements during cleanup", errors.Attr(delErr), "version_id", c.ID.String())
slog.ErrorContext(ctx, "failed to delete config elements during cleanup", "error", delErr, "version_id", c.ID.String())
}
_, delErr = r.store.BunDB().NewDelete().Model(new(opamptypes.AgentConfigVersion)).Where("id = ?", c.ID).Where("org_id = ?", orgId).Exec(ctx)
if delErr != nil {
slog.ErrorContext(ctx, "failed to delete config version during cleanup", errors.Attr(delErr), "version_id", c.ID.String())
slog.ErrorContext(ctx, "failed to delete config version during cleanup", "error", delErr, "version_id", c.ID.String())
}
}
}()
@@ -171,7 +170,7 @@ func (r *Repo) insertConfig(
Model(c).
Exec(ctx)
if dbErr != nil {
slog.ErrorContext(ctx, "error in inserting config version", errors.Attr(dbErr))
slog.ErrorContext(ctx, "error in inserting config version", "error", dbErr)
return errors.WrapInternalf(dbErr, CodeConfigVersionInsertFailed, "failed to insert config version")
}
@@ -222,7 +221,7 @@ func (r *Repo) updateDeployStatus(ctx context.Context,
Where("org_id = ?", orgId).
Exec(ctx)
if err != nil {
slog.ErrorContext(ctx, "failed to update deploy status", errors.Attr(err))
slog.ErrorContext(ctx, "failed to update deploy status", "error", err)
return model.BadRequest(fmt.Errorf("failed to update deploy status"))
}
@@ -259,7 +258,7 @@ func (r *Repo) updateDeployStatusByHash(
Where("org_id = ?", orgId).
Exec(ctx)
if err != nil {
slog.ErrorContext(ctx, "failed to update deploy status", errors.Attr(err))
slog.ErrorContext(ctx, "failed to update deploy status", "error", err)
return errors.WrapInternalf(err, CodeConfigDeployStatusUpdateFailed, "failed to update deploy status")
}

View File

@@ -9,9 +9,6 @@ import (
"sync"
"sync/atomic"
"github.com/google/uuid"
yaml "gopkg.in/yaml.v3"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/app/opamp"
filterprocessor "github.com/SigNoz/signoz/pkg/query-service/app/opamp/otelconfig/filterprocessor"
@@ -20,6 +17,8 @@ import (
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/opamptypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/google/uuid"
yaml "gopkg.in/yaml.v3"
)
var m *Manager
@@ -186,6 +185,7 @@ func (m *Manager) GetDeployStatusByHash(ctx context.Context, orgId valuer.UUID,
return m.Repo.GetDeployStatusByHash(ctx, orgId, configHash)
}
func GetLatestVersion(
ctx context.Context, orgId valuer.UUID, elementType opamptypes.ElementType,
) (*opamptypes.AgentConfigVersion, error) {
@@ -230,7 +230,7 @@ func NotifyConfigUpdate(ctx context.Context) {
func Redeploy(ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType, version int) error {
configVersion, err := GetConfigVersion(ctx, orgId, typ, version)
if err != nil {
slog.ErrorContext(ctx, "failed to fetch config version during redeploy", errors.Attr(err))
slog.ErrorContext(ctx, "failed to fetch config version during redeploy", "error", err)
return err
}
@@ -242,7 +242,7 @@ func Redeploy(ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType
case opamptypes.ElementTypeSamplingRules:
var config *tsp.Config
if err := yaml.Unmarshal([]byte(configVersion.Config), &config); err != nil {
slog.DebugContext(ctx, "failed to read last conf correctly", errors.Attr(err))
slog.DebugContext(ctx, "failed to read last conf correctly", "error", err)
return model.BadRequest(fmt.Errorf("failed to read the stored config correctly"))
}
@@ -254,7 +254,7 @@ func Redeploy(ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType
opamp.AddToTracePipelineSpec("signoz_tail_sampling")
configHash, err := opamp.UpsertControlProcessors(ctx, "traces", processorConf, m.OnConfigUpdate)
if err != nil {
slog.ErrorContext(ctx, "failed to call agent config update for trace processor", errors.Attr(err))
slog.ErrorContext(ctx, "failed to call agent config update for trace processor", "error", err)
return errors.WithAdditionalf(err, "failed to deploy the config")
}
@@ -262,7 +262,7 @@ func Redeploy(ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType
case opamptypes.ElementTypeDropRules:
var filterConfig *filterprocessor.Config
if err := yaml.Unmarshal([]byte(configVersion.Config), &filterConfig); err != nil {
slog.ErrorContext(ctx, "failed to read last conf correctly", errors.Attr(err))
slog.ErrorContext(ctx, "failed to read last conf correctly", "error", err)
return model.InternalError(fmt.Errorf("failed to read the stored config correctly"))
}
processorConf := map[string]interface{}{
@@ -272,7 +272,7 @@ func Redeploy(ctx context.Context, orgId valuer.UUID, typ opamptypes.ElementType
opamp.AddToMetricsPipelineSpec("filter")
configHash, err := opamp.UpsertControlProcessors(ctx, "metrics", processorConf, m.OnConfigUpdate)
if err != nil {
slog.ErrorContext(ctx, "failed to call agent config update for trace processor", errors.Attr(err))
slog.ErrorContext(ctx, "failed to call agent config update for trace processor", "error", err)
return err
}
@@ -298,13 +298,13 @@ func UpsertFilterProcessor(ctx context.Context, orgId valuer.UUID, version int,
opamp.AddToMetricsPipelineSpec("filter")
configHash, err := opamp.UpsertControlProcessors(ctx, "metrics", processorConf, m.OnConfigUpdate)
if err != nil {
slog.ErrorContext(ctx, "failed to call agent config update for trace processor", errors.Attr(err))
slog.ErrorContext(ctx, "failed to call agent config update for trace processor", "error", err)
return err
}
processorConfYaml, yamlErr := yaml.Marshal(config)
if yamlErr != nil {
slog.WarnContext(ctx, "unexpected error while transforming processor config to yaml", errors.Attr(yamlErr))
slog.WarnContext(ctx, "unexpected error while transforming processor config to yaml", "error", yamlErr)
}
m.updateDeployStatus(ctx, orgId, opamptypes.ElementTypeDropRules, version, opamptypes.DeployInitiated.StringValue(), "Deployment started", configHash, string(processorConfYaml))
@@ -349,13 +349,13 @@ func UpsertSamplingProcessor(ctx context.Context, orgId valuer.UUID, version int
opamp.AddToTracePipelineSpec("signoz_tail_sampling")
configHash, err := opamp.UpsertControlProcessors(ctx, "traces", processorConf, m.OnConfigUpdate)
if err != nil {
slog.ErrorContext(ctx, "failed to call agent config update for trace processor", errors.Attr(err))
slog.ErrorContext(ctx, "failed to call agent config update for trace processor", "error", err)
return err
}
processorConfYaml, yamlErr := yaml.Marshal(config)
if yamlErr != nil {
slog.WarnContext(ctx, "unexpected error while transforming processor config to yaml", errors.Attr(yamlErr))
slog.WarnContext(ctx, "unexpected error while transforming processor config to yaml", "error", yamlErr)
}
m.updateDeployStatus(ctx, orgId, opamptypes.ElementTypeSamplingRules, version, opamptypes.DeployInitiated.StringValue(), "Deployment started", configHash, string(processorConfYaml))

View File

@@ -9,7 +9,6 @@ import (
"strings"
"github.com/SigNoz/signoz-otel-collector/utils/fingerprint"
errorsV2 "github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/model"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
)
@@ -79,7 +78,7 @@ func (r *ClickHouseReader) GetQBFilterSuggestionsForLogs(
)
if err != nil {
// Do not fail the entire request if only example query generation fails
r.logger.ErrorContext(ctx, "could not find attribute values for creating example query", errorsV2.Attr(err))
r.logger.ErrorContext(ctx, "could not find attribute values for creating example query", "error", err)
} else {
// add example queries for as many attributes as possible.
@@ -184,7 +183,7 @@ func (r *ClickHouseReader) getValuesForLogAttributes(
rows, err := r.db.Query(ctx, query, tagKeyQueryArgs...)
if err != nil {
r.logger.ErrorContext(ctx, "couldn't query attrib values for suggestions", errorsV2.Attr(err))
r.logger.ErrorContext(ctx, "couldn't query attrib values for suggestions", "error", err)
return nil, model.InternalError(fmt.Errorf(
"couldn't query attrib values for suggestions: %w", err,
))

View File

@@ -15,8 +15,6 @@ import (
"sync"
"time"
"github.com/uptrace/bun"
"github.com/SigNoz/signoz/pkg/prometheus"
"github.com/SigNoz/signoz/pkg/query-service/model/metrics_explorer"
"github.com/SigNoz/signoz/pkg/sqlstore"
@@ -26,18 +24,17 @@ import (
"github.com/SigNoz/signoz/pkg/types/instrumentationtypes"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/google/uuid"
"github.com/pkg/errors"
"github.com/uptrace/bun"
errorsV2 "github.com/SigNoz/signoz/pkg/errors"
"github.com/google/uuid"
"github.com/pkg/errors"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/util/stats"
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
"github.com/SigNoz/signoz/pkg/cache"
"log/slog"
@@ -324,7 +321,7 @@ func (r *ClickHouseReader) GetTopLevelOperations(ctx context.Context, start, end
rows, err := r.db.Query(ctx, query, clickhouse.Named("start", start), clickhouse.Named("services", services))
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
@@ -396,7 +393,7 @@ func (r *ClickHouseReader) buildResourceSubQuery(tags []model.TagQueryParam, svc
v3.AttributeKey{},
false)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return "", err
}
return resourceSubQuery, nil
@@ -476,7 +473,7 @@ func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.G
resourceSubQuery, err := r.buildResourceSubQuery(queryParams.Tags, svc, *queryParams.Start, *queryParams.End)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return
}
query += `
@@ -501,7 +498,7 @@ func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.G
}
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return
}
@@ -513,7 +510,7 @@ func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.G
err = r.db.QueryRow(ctx, errorQuery, args...).Scan(&numErrors)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return
}
@@ -796,7 +793,7 @@ func (r *ClickHouseReader) GetTopOperations(ctx context.Context, queryParams *mo
resourceSubQuery, err := r.buildResourceSubQuery(queryParams.Tags, queryParams.ServiceName, *queryParams.Start, *queryParams.End)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
query += `
@@ -813,7 +810,7 @@ func (r *ClickHouseReader) GetTopOperations(ctx context.Context, queryParams *mo
err = r.db.Select(ctx, &topOperationsItems, query, namedArgs...)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
@@ -850,7 +847,7 @@ func (r *ClickHouseReader) GetUsage(ctx context.Context, queryParams *model.GetU
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, fmt.Errorf("error in processing sql query")
}
@@ -880,7 +877,7 @@ func (r *ClickHouseReader) GetSpansForTrace(ctx context.Context, traceID string,
if err == sql.ErrNoRows {
return []model.SpanItemV2{}, nil
}
r.logger.Error("Error in processing trace summary sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing trace summary sql query", "error", err)
return nil, model.ExecutionError(fmt.Errorf("error in processing trace summary sql query: %w", err))
}
@@ -889,7 +886,7 @@ func (r *ClickHouseReader) GetSpansForTrace(ctx context.Context, traceID string,
err = r.db.Select(ctx, &searchScanResponses, traceDetailsQuery, traceID, strconv.FormatInt(traceSummary.Start.Unix()-1800, 10), strconv.FormatInt(traceSummary.End.Unix(), 10))
r.logger.Info(traceDetailsQuery)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, model.ExecutionError(fmt.Errorf("error in processing trace data sql query: %w", err))
}
r.logger.Info("trace details query took: ", "duration", time.Since(queryStartTime), "traceID", traceID)
@@ -901,7 +898,7 @@ func (r *ClickHouseReader) GetWaterfallSpansForTraceWithMetadataCache(ctx contex
cachedTraceData := new(model.GetWaterfallSpansForTraceWithMetadataCache)
err := r.cacheForTraceDetail.Get(ctx, orgID, strings.Join([]string{"getWaterfallSpansForTraceWithMetadata", traceID}, "-"), cachedTraceData)
if err != nil {
r.logger.Debug("error in retrieving getWaterfallSpansForTraceWithMetadata cache", errorsV2.Attr(err), "traceID", traceID)
r.logger.Debug("error in retrieving getWaterfallSpansForTraceWithMetadata cache", "error", err, "traceID", traceID)
return nil, err
}
@@ -952,7 +949,7 @@ func (r *ClickHouseReader) GetWaterfallSpansForTraceWithMetadata(ctx context.Con
ref := []model.OtelSpanRef{}
err := json.Unmarshal([]byte(item.References), &ref)
if err != nil {
r.logger.Error("getWaterfallSpansForTraceWithMetadata: error unmarshalling references", errorsV2.Attr(err), "traceID", traceID)
r.logger.Error("getWaterfallSpansForTraceWithMetadata: error unmarshalling references", "error", err, "traceID", traceID)
return nil, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, "getWaterfallSpansForTraceWithMetadata: error unmarshalling references %s", err.Error())
}
@@ -972,7 +969,7 @@ func (r *ClickHouseReader) GetWaterfallSpansForTraceWithMetadata(ctx context.Con
var eventMap model.Event
err = json.Unmarshal([]byte(event), &eventMap)
if err != nil {
r.logger.Error("Error unmarshalling events", errorsV2.Attr(err))
r.logger.Error("Error unmarshalling events", "error", err)
return nil, errorsV2.Newf(errorsV2.TypeInternal, errorsV2.CodeInternal, "getWaterfallSpansForTraceWithMetadata: error in unmarshalling events %s", err.Error())
}
events = append(events, eventMap)
@@ -1085,7 +1082,7 @@ func (r *ClickHouseReader) GetWaterfallSpansForTraceWithMetadata(ctx context.Con
r.logger.Info("getWaterfallSpansForTraceWithMetadata: processing pre cache", "duration", time.Since(processingBeforeCache), "traceID", traceID)
cacheErr := r.cacheForTraceDetail.Set(ctx, orgID, strings.Join([]string{"getWaterfallSpansForTraceWithMetadata", traceID}, "-"), &traceCache, time.Minute*5)
if cacheErr != nil {
r.logger.Debug("failed to store cache for getWaterfallSpansForTraceWithMetadata", "traceID", traceID, errorsV2.Attr(err))
r.logger.Debug("failed to store cache for getWaterfallSpansForTraceWithMetadata", "traceID", traceID, "error", err)
}
}
@@ -1119,7 +1116,7 @@ func (r *ClickHouseReader) GetFlamegraphSpansForTraceCache(ctx context.Context,
cachedTraceData := new(model.GetFlamegraphSpansForTraceCache)
err := r.cacheForTraceDetail.Get(ctx, orgID, strings.Join([]string{"getFlamegraphSpansForTrace", traceID}, "-"), cachedTraceData)
if err != nil {
r.logger.Debug("error in retrieving getFlamegraphSpansForTrace cache", errorsV2.Attr(err), "traceID", traceID)
r.logger.Debug("error in retrieving getFlamegraphSpansForTrace cache", "error", err, "traceID", traceID)
return nil, err
}
@@ -1167,7 +1164,7 @@ func (r *ClickHouseReader) GetFlamegraphSpansForTrace(ctx context.Context, orgID
ref := []model.OtelSpanRef{}
err := json.Unmarshal([]byte(item.References), &ref)
if err != nil {
r.logger.Error("Error unmarshalling references", errorsV2.Attr(err))
r.logger.Error("Error unmarshalling references", "error", err)
return nil, errorsV2.Newf(errorsV2.TypeInternal, errorsV2.CodeInternal, "getFlamegraphSpansForTrace: error in unmarshalling references %s", err.Error())
}
@@ -1176,7 +1173,7 @@ func (r *ClickHouseReader) GetFlamegraphSpansForTrace(ctx context.Context, orgID
var eventMap model.Event
err = json.Unmarshal([]byte(event), &eventMap)
if err != nil {
r.logger.Error("Error unmarshalling events", errorsV2.Attr(err))
r.logger.Error("Error unmarshalling events", "error", err)
return nil, errorsV2.Newf(errorsV2.TypeInternal, errorsV2.CodeInternal, "getFlamegraphSpansForTrace: error in unmarshalling events %s", err.Error())
}
events = append(events, eventMap)
@@ -1255,7 +1252,7 @@ func (r *ClickHouseReader) GetFlamegraphSpansForTrace(ctx context.Context, orgID
r.logger.Info("getFlamegraphSpansForTrace: processing pre cache", "duration", time.Since(processingBeforeCache), "traceID", traceID)
cacheErr := r.cacheForTraceDetail.Set(ctx, orgID, strings.Join([]string{"getFlamegraphSpansForTrace", traceID}, "-"), &traceCache, time.Minute*5)
if cacheErr != nil {
r.logger.Debug("failed to store cache for getFlamegraphSpansForTrace", "traceID", traceID, errorsV2.Attr(err))
r.logger.Debug("failed to store cache for getFlamegraphSpansForTrace", "traceID", traceID, "error", err)
}
}
@@ -1315,7 +1312,7 @@ func (r *ClickHouseReader) GetDependencyGraph(ctx context.Context, queryParams *
err := r.db.Select(ctx, &response, query, args...)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, fmt.Errorf("error in processing sql query %w", err)
}
@@ -1434,13 +1431,13 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
Model(&ttl).
Exec(ctx)
if dbErr != nil {
r.logger.Error("error in inserting to ttl_status table", errorsV2.Attr(dbErr))
r.logger.Error("error in inserting to ttl_status table", "error", dbErr)
return
}
err := r.setColdStorage(context.Background(), tableName, params.ColdStorageVolume)
if err != nil {
r.logger.Error("error in setting cold storage", errorsV2.Attr(err))
r.logger.Error("error in setting cold storage", "error", err)
statusItem, apiErr := r.checkTTLStatusItem(ctx, orgID, tableName)
if apiErr == nil {
_, dbErr := r.
@@ -1453,7 +1450,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
r.logger.Error("Error in processing ttl_status update sql query", errorsV2.Attr(dbErr))
r.logger.Error("Error in processing ttl_status update sql query", "error", dbErr)
return
}
}
@@ -1462,7 +1459,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
r.logger.Info("Executing TTL request: ", "request", query)
statusItem, _ := r.checkTTLStatusItem(ctx, orgID, tableName)
if err := r.db.Exec(ctx, query); err != nil {
r.logger.Error("error while setting ttl", errorsV2.Attr(err))
r.logger.Error("error while setting ttl", "error", err)
_, dbErr := r.
sqlDB.
BunDB().
@@ -1473,7 +1470,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
r.logger.Error("Error in processing ttl_status update sql query", errorsV2.Attr(dbErr))
r.logger.Error("Error in processing ttl_status update sql query", "error", dbErr)
return
}
return
@@ -1488,7 +1485,7 @@ func (r *ClickHouseReader) setTTLLogs(ctx context.Context, orgID string, params
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
r.logger.Error("Error in processing ttl_status update sql query", errorsV2.Attr(dbErr))
r.logger.Error("Error in processing ttl_status update sql query", "error", dbErr)
return
}
}
@@ -1572,7 +1569,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
Model(&ttl).
Exec(ctx)
if dbErr != nil {
r.logger.Error("error in inserting to ttl_status table", errorsV2.Attr(dbErr))
r.logger.Error("error in inserting to ttl_status table", "error", dbErr)
return
}
@@ -1590,7 +1587,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
}
err := r.setColdStorage(context.Background(), tableName, params.ColdStorageVolume)
if err != nil {
r.logger.Error("Error in setting cold storage", errorsV2.Attr(err))
r.logger.Error("Error in setting cold storage", "error", err)
statusItem, apiErr := r.checkTTLStatusItem(ctx, orgID, tableName)
if apiErr == nil {
_, dbErr := r.
@@ -1603,7 +1600,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
r.logger.Error("Error in processing ttl_status update sql query", errorsV2.Attr(dbErr))
r.logger.Error("Error in processing ttl_status update sql query", "error", dbErr)
return
}
}
@@ -1613,7 +1610,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
r.logger.Error(" ExecutingTTL request: ", "request", req)
statusItem, _ := r.checkTTLStatusItem(ctx, orgID, tableName)
if err := r.db.Exec(ctx, req); err != nil {
r.logger.Error("Error in executing set TTL query", errorsV2.Attr(err))
r.logger.Error("Error in executing set TTL query", "error", err)
_, dbErr := r.
sqlDB.
BunDB().
@@ -1624,7 +1621,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
r.logger.Error("Error in processing ttl_status update sql query", errorsV2.Attr(dbErr))
r.logger.Error("Error in processing ttl_status update sql query", "error", dbErr)
return
}
return
@@ -1639,7 +1636,7 @@ func (r *ClickHouseReader) setTTLTraces(ctx context.Context, orgID string, param
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
r.logger.Error("Error in processing ttl_status update sql query", errorsV2.Attr(dbErr))
r.logger.Error("Error in processing ttl_status update sql query", "error", dbErr)
return
}
}(distributedTableName)
@@ -1665,7 +1662,7 @@ func (r *ClickHouseReader) hasCustomRetentionColumn(ctx context.Context) (bool,
r.logger.Debug("_retention_days column not found in logs table", "table", r.logsLocalTableV2)
return false, nil
}
r.logger.Error("Error checking for _retention_days column", errorsV2.Attr(err))
r.logger.Error("Error checking for _retention_days column", "error", err)
return false, errorsV2.Wrapf(err, errorsV2.TypeInternal, errorsV2.CodeInternal, "error checking columns")
}
@@ -1845,14 +1842,14 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *m
// Insert TTL setting record
_, dbErr := r.sqlDB.BunDB().NewInsert().Model(&customTTL).Exec(ctx)
if dbErr != nil {
r.logger.Error("error in inserting to custom_retention_ttl_settings table", errorsV2.Attr(dbErr))
r.logger.Error("error in inserting to custom_retention_ttl_settings table", "error", dbErr)
return nil, errorsV2.Wrapf(dbErr, errorsV2.TypeInternal, errorsV2.CodeInternal, "error inserting TTL settings")
}
if len(params.ColdStorageVolume) > 0 && coldStorageDuration > 0 {
err := r.setColdStorage(ctx, tableName, params.ColdStorageVolume)
if err != nil {
r.logger.Error("error in setting cold storage", errorsV2.Attr(err))
r.logger.Error("error in setting cold storage", "error", err)
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, constants.StatusFailed)
return nil, errorsV2.Wrapf(err.Err, errorsV2.TypeInternal, errorsV2.CodeInternal, "error setting cold storage for table %s", tableName)
}
@@ -1861,7 +1858,7 @@ func (r *ClickHouseReader) SetTTLV2(ctx context.Context, orgID string, params *m
for i, query := range queries {
r.logger.Debug("Executing custom retention TTL request: ", "request", query, "step", i+1)
if err := r.db.Exec(ctx, query); err != nil {
r.logger.Error("error while setting custom retention ttl", errorsV2.Attr(err))
r.logger.Error("error while setting custom retention ttl", "error", err)
r.updateCustomRetentionTTLStatus(ctx, orgID, tableName, constants.StatusFailed)
return nil, errorsV2.Wrapf(err, errorsV2.TypeInternal, errorsV2.CodeInternal, "error setting custom retention TTL for table %s, query: %s", tableName, query)
}
@@ -1953,7 +1950,7 @@ func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID stri
hasCustomRetention, err := r.hasCustomRetentionColumn(ctx)
if err != nil {
// If there's an error checking, assume V1 and proceed
r.logger.Warn("Error checking for custom retention column, assuming V1", errorsV2.Attr(err))
r.logger.Warn("Error checking for custom retention column, assuming V1", "error", err)
hasCustomRetention = false
}
@@ -1974,7 +1971,7 @@ func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID stri
Scan(ctx)
if err != nil && err != sql.ErrNoRows {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, errorsV2.Newf(errorsV2.TypeInternal, errorsV2.CodeInternal, "error in processing get custom ttl query")
}
@@ -1991,7 +1988,7 @@ func (r *ClickHouseReader) GetCustomRetentionTTL(ctx context.Context, orgID stri
var ttlConditions []model.CustomRetentionRule
if customTTL.Condition != "" {
if err := json.Unmarshal([]byte(customTTL.Condition), &ttlConditions); err != nil {
r.logger.Error("Error parsing TTL conditions", errorsV2.Attr(err))
r.logger.Error("Error parsing TTL conditions", "error", err)
ttlConditions = []model.CustomRetentionRule{}
}
}
@@ -2044,7 +2041,7 @@ func (r *ClickHouseReader) checkCustomRetentionTTLStatusItem(ctx context.Context
Scan(ctx)
if err != nil && err != sql.ErrNoRows {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return ttl, errorsV2.Newf(errorsV2.TypeInternal, errorsV2.CodeInternal, "error in processing custom_retention_ttl_status check sql query")
}
@@ -2061,7 +2058,7 @@ func (r *ClickHouseReader) updateCustomRetentionTTLStatus(ctx context.Context, o
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
r.logger.Error("Error in processing custom_retention_ttl_status update sql query", errorsV2.Attr(dbErr))
r.logger.Error("Error in processing custom_retention_ttl_status update sql query", "error", dbErr)
}
}
}
@@ -2244,7 +2241,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
Model(&ttl).
Exec(ctx)
if dbErr != nil {
r.logger.Error("error in inserting to ttl_status table", errorsV2.Attr(dbErr))
r.logger.Error("error in inserting to ttl_status table", "error", dbErr)
return
}
timeColumn := "timestamp_ms"
@@ -2262,7 +2259,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
}
err := r.setColdStorage(context.Background(), tableName, params.ColdStorageVolume)
if err != nil {
r.logger.Error("Error in setting cold storage", errorsV2.Attr(err))
r.logger.Error("Error in setting cold storage", "error", err)
statusItem, apiErr := r.checkTTLStatusItem(ctx, orgID, tableName)
if apiErr == nil {
_, dbErr := r.
@@ -2275,7 +2272,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
r.logger.Error("Error in processing ttl_status update sql query", errorsV2.Attr(dbErr))
r.logger.Error("Error in processing ttl_status update sql query", "error", dbErr)
return
}
}
@@ -2285,7 +2282,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
r.logger.Info("Executing TTL request: ", "request", req)
statusItem, _ := r.checkTTLStatusItem(ctx, orgID, tableName)
if err := r.db.Exec(ctx, req); err != nil {
r.logger.Error("error while setting ttl.", errorsV2.Attr(err))
r.logger.Error("error while setting ttl.", "error", err)
_, dbErr := r.
sqlDB.
BunDB().
@@ -2296,7 +2293,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
r.logger.Error("Error in processing ttl_status update sql query", errorsV2.Attr(dbErr))
r.logger.Error("Error in processing ttl_status update sql query", "error", dbErr)
return
}
return
@@ -2311,7 +2308,7 @@ func (r *ClickHouseReader) setTTLMetrics(ctx context.Context, orgID string, para
Where("id = ?", statusItem.ID.StringValue()).
Exec(ctx)
if dbErr != nil {
r.logger.Error("Error in processing ttl_status update sql query", errorsV2.Attr(dbErr))
r.logger.Error("Error in processing ttl_status update sql query", "error", dbErr)
return
}
}
@@ -2336,7 +2333,7 @@ func (r *ClickHouseReader) deleteTtlTransactions(ctx context.Context, orgID stri
Scan(ctx, &limitTransactions)
if err != nil {
r.logger.Error("Error in processing ttl_status delete sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing ttl_status delete sql query", "error", err)
}
_, err = r.
@@ -2347,7 +2344,7 @@ func (r *ClickHouseReader) deleteTtlTransactions(ctx context.Context, orgID stri
Where("transaction_id NOT IN (?)", bun.In(limitTransactions)).
Exec(ctx)
if err != nil {
r.logger.Error("Error in processing ttl_status delete sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing ttl_status delete sql query", "error", err)
}
}
@@ -2366,7 +2363,7 @@ func (r *ClickHouseReader) checkTTLStatusItem(ctx context.Context, orgID string,
Limit(1).
Scan(ctx)
if err != nil && err != sql.ErrNoRows {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return ttl, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing ttl_status check sql query")}
}
return ttl, nil
@@ -2413,7 +2410,7 @@ func (r *ClickHouseReader) setColdStorage(ctx context.Context, tableName string,
r.logger.Info("Executing Storage policy request: ", "request", policyReq)
if err := r.db.Exec(ctx, policyReq); err != nil {
r.logger.Error("error while setting storage policy", errorsV2.Attr(err))
r.logger.Error("error while setting storage policy", "error", err)
return &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while setting storage policy. Err=%v", err)}
}
}
@@ -2430,7 +2427,7 @@ func (r *ClickHouseReader) GetDisks(ctx context.Context) (*[]model.DiskItem, *mo
query := "SELECT name,type FROM system.disks"
if err := r.db.Select(ctx, &diskItems, query); err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting disks. Err=%v", err)}
}
@@ -2490,7 +2487,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
err := r.db.Select(ctx, &dbResp, query)
if err != nil {
r.logger.Error("error while getting ttl", errorsV2.Attr(err))
r.logger.Error("error while getting ttl", "error", err)
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting ttl. Err=%v", err)}
}
if len(dbResp) == 0 {
@@ -2508,7 +2505,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
err := r.db.Select(ctx, &dbResp, query)
if err != nil {
r.logger.Error("error while getting ttl", errorsV2.Attr(err))
r.logger.Error("error while getting ttl", "error", err)
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting ttl. Err=%v", err)}
}
if len(dbResp) == 0 {
@@ -2526,7 +2523,7 @@ func (r *ClickHouseReader) GetTTL(ctx context.Context, orgID string, ttlParams *
err := r.db.Select(ctx, &dbResp, query)
if err != nil {
r.logger.Error("error while getting ttl", errorsV2.Attr(err))
r.logger.Error("error while getting ttl", "error", err)
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error while getting ttl. Err=%v", err)}
}
if len(dbResp) == 0 {
@@ -2659,7 +2656,7 @@ func (r *ClickHouseReader) ListErrors(ctx context.Context, queryParams *model.Li
args = append(args, argsSubQuery...)
if errStatus != nil {
r.logger.Error("Error in processing tags", errorsV2.Attr(errStatus))
r.logger.Error("Error in processing tags", "error", errStatus)
return nil, errStatus
}
query = query + " GROUP BY groupID"
@@ -2690,7 +2687,7 @@ func (r *ClickHouseReader) ListErrors(ctx context.Context, queryParams *model.Li
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
@@ -2725,7 +2722,7 @@ func (r *ClickHouseReader) CountErrors(ctx context.Context, queryParams *model.C
args = append(args, argsSubQuery...)
if errStatus != nil {
r.logger.Error("Error in processing tags", errorsV2.Attr(errStatus))
r.logger.Error("Error in processing tags", "error", errStatus)
return 0, errStatus
}
@@ -2733,7 +2730,7 @@ func (r *ClickHouseReader) CountErrors(ctx context.Context, queryParams *model.C
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return 0, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
@@ -2760,7 +2757,7 @@ func (r *ClickHouseReader) GetErrorFromErrorID(ctx context.Context, queryParams
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
@@ -2789,7 +2786,7 @@ func (r *ClickHouseReader) GetErrorFromGroupID(ctx context.Context, queryParams
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
@@ -2813,12 +2810,12 @@ func (r *ClickHouseReader) GetNextPrevErrorIDs(ctx context.Context, queryParams
}
getNextPrevErrorIDsResponse.NextErrorID, getNextPrevErrorIDsResponse.NextTimestamp, apiErr = r.getNextErrorID(ctx, queryParams)
if apiErr != nil {
r.logger.Error("Unable to get next error ID due to err: ", errorsV2.Attr(apiErr))
r.logger.Error("Unable to get next error ID due to err: ", "error", apiErr)
return nil, apiErr
}
getNextPrevErrorIDsResponse.PrevErrorID, getNextPrevErrorIDsResponse.PrevTimestamp, apiErr = r.getPrevErrorID(ctx, queryParams)
if apiErr != nil {
r.logger.Error("Unable to get prev error ID due to err: ", errorsV2.Attr(apiErr))
r.logger.Error("Unable to get prev error ID due to err: ", "error", apiErr)
return nil, apiErr
}
return &getNextPrevErrorIDsResponse, nil
@@ -2842,7 +2839,7 @@ func (r *ClickHouseReader) getNextErrorID(ctx context.Context, queryParams *mode
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
if len(getNextErrorIDReponse) == 0 {
@@ -2863,7 +2860,7 @@ func (r *ClickHouseReader) getNextErrorID(ctx context.Context, queryParams *mode
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
if len(getNextErrorIDReponse) == 0 {
@@ -2877,7 +2874,7 @@ func (r *ClickHouseReader) getNextErrorID(ctx context.Context, queryParams *mode
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
@@ -2916,7 +2913,7 @@ func (r *ClickHouseReader) getPrevErrorID(ctx context.Context, queryParams *mode
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
if len(getPrevErrorIDReponse) == 0 {
@@ -2937,7 +2934,7 @@ func (r *ClickHouseReader) getPrevErrorID(ctx context.Context, queryParams *mode
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
if len(getPrevErrorIDReponse) == 0 {
@@ -2951,7 +2948,7 @@ func (r *ClickHouseReader) getPrevErrorID(ctx context.Context, queryParams *mode
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return "", time.Time{}, &model.ApiError{Typ: model.ErrorExec, Err: fmt.Errorf("error in processing sql query")}
}
@@ -2979,7 +2976,7 @@ func (r *ClickHouseReader) FetchTemporality(ctx context.Context, orgID valuer.UU
// Batch fetch all metadata at once
metadataMap, apiErr := r.GetUpdatedMetricsMetadata(ctx, orgID, metricNames...)
if apiErr != nil {
r.logger.Warn("Failed to fetch updated metrics metadata", errorsV2.Attr(apiErr))
r.logger.Warn("Failed to fetch updated metrics metadata", "error", apiErr)
return nil, apiErr
}
@@ -3328,7 +3325,7 @@ func (r *ClickHouseReader) QueryDashboardVars(ctx context.Context, query string)
r.logger.Info(query)
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, err
}
@@ -3382,7 +3379,7 @@ func (r *ClickHouseReader) GetMetricAggregateAttributes(ctx context.Context, org
rows, err := r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText), normalized)
if err != nil {
r.logger.Error("Error while querying metric names", errorsV2.Attr(err))
r.logger.Error("Error while querying metric names", "error", err)
return nil, fmt.Errorf("error while executing metric name query: %s", err.Error())
}
defer rows.Close()
@@ -3461,7 +3458,7 @@ func (r *ClickHouseReader) GetMeterAggregateAttributes(ctx context.Context, orgI
rows, err := r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText))
if err != nil {
r.logger.Error("Error while querying meter names", errorsV2.Attr(err))
r.logger.Error("Error while querying meter names", "error", err)
return nil, fmt.Errorf("error while executing meter name query: %s", err.Error())
}
defer rows.Close()
@@ -3516,7 +3513,7 @@ func (r *ClickHouseReader) GetMetricAttributeKeys(ctx context.Context, req *v3.F
}
rows, err = r.db.Query(ctx, query, req.AggregateAttribute, common.PastDayRoundOff(), normalized, fmt.Sprintf("%%%s%%", req.SearchText))
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -3556,7 +3553,7 @@ func (r *ClickHouseReader) GetMeterAttributeKeys(ctx context.Context, req *v3.Fi
}
rows, err = r.db.Query(ctx, query, req.AggregateAttribute, fmt.Sprintf("%%%s%%", req.SearchText))
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -3605,7 +3602,7 @@ func (r *ClickHouseReader) GetMetricAttributeValues(ctx context.Context, req *v3
rows, err = r.db.Query(ctx, query, req.FilterAttributeKey, names, req.FilterAttributeKey, fmt.Sprintf("%%%s%%", req.SearchText), common.PastDayRoundOff(), normalized)
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -3635,7 +3632,7 @@ func (r *ClickHouseReader) GetMetricMetadata(ctx context.Context, orgID valuer.U
// 1. Fetch metadata from cache/db using unified function
metadataMap, apiError := r.GetUpdatedMetricsMetadata(ctx, orgID, metricName)
if apiError != nil {
r.logger.Error("Error in getting metric cached metadata", errorsV2.Attr(apiError))
r.logger.Error("Error in getting metric cached metadata", "error", apiError)
return nil, fmt.Errorf("error fetching metric metadata: %s", apiError.Err.Error())
}
@@ -3678,7 +3675,7 @@ func (r *ClickHouseReader) GetMetricMetadata(ctx context.Context, orgID valuer.U
rows, err := r.db.Query(ctx, query, metricName, unixMilli, serviceName, serviceName)
if err != nil {
r.logger.Error("Error while querying histogram buckets", errorsV2.Attr(err))
r.logger.Error("Error while querying histogram buckets", "error", err)
return nil, fmt.Errorf("error while querying histogram buckets: %s", err.Error())
}
defer rows.Close()
@@ -3690,7 +3687,7 @@ func (r *ClickHouseReader) GetMetricMetadata(ctx context.Context, orgID valuer.U
}
le, err := strconv.ParseFloat(leStr, 64)
if err != nil || math.IsInf(le, 0) {
r.logger.Error("Invalid 'le' bucket value", "value", leStr, errorsV2.Attr(err))
r.logger.Error("Invalid 'le' bucket value", "value", leStr, "error", err)
continue
}
leFloat64 = append(leFloat64, le)
@@ -3903,7 +3900,7 @@ func (r *ClickHouseReader) GetLogAggregateAttributes(ctx context.Context, req *v
query = fmt.Sprintf("SELECT DISTINCT(tag_key), tag_type, tag_data_type from %s.%s WHERE %s and tag_type != 'logfield' limit $2", r.logsDB, r.logsTagAttributeTableV2, where)
rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText), req.Limit)
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -3967,7 +3964,7 @@ func (r *ClickHouseReader) GetLogAttributeKeys(ctx context.Context, req *v3.Filt
}
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4182,7 +4179,7 @@ func (r *ClickHouseReader) GetLogAttributeValues(ctx context.Context, req *v3.Fi
}
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4252,7 +4249,7 @@ func readRow(vars []interface{}, columnNames []string, countOfNumberCols int) ([
var metric map[string]string
err := json.Unmarshal([]byte(*v), &metric)
if err != nil {
slog.Error("unexpected error encountered", errorsV2.Attr(err))
slog.Error("unexpected error encountered", "error", err)
}
for key, val := range metric {
groupBy = append(groupBy, val)
@@ -4448,7 +4445,7 @@ func (r *ClickHouseReader) GetTimeSeriesResultV3(ctx context.Context, query stri
go func() {
err := r.queryProgressTracker.ReportQueryProgress(qid, p)
if err != nil {
r.logger.Error("Couldn't report query progress", "queryId", qid, errorsV2.Attr(err))
r.logger.Error("Couldn't report query progress", "queryId", qid, "error", err)
}
}()
},
@@ -4459,7 +4456,7 @@ func (r *ClickHouseReader) GetTimeSeriesResultV3(ctx context.Context, query stri
rows, err := r.db.Query(ctx, query)
if err != nil {
r.logger.Error("error while reading time series result", errorsV2.Attr(err))
r.logger.Error("error while reading time series result", "error", err)
return nil, errors.New(err.Error())
}
defer rows.Close()
@@ -4501,7 +4498,7 @@ func (r *ClickHouseReader) GetListResultV3(ctx context.Context, query string) ([
})
rows, err := r.db.Query(ctx, query)
if err != nil {
r.logger.Error("error while reading time series result", errorsV2.Attr(err))
r.logger.Error("error while reading time series result", "error", err)
return nil, errors.New(err.Error())
}
@@ -4579,7 +4576,7 @@ func (r *ClickHouseReader) GetMetricsExistenceAndEarliestTime(ctx context.Contex
var count, minFirstReported uint64
err := r.db.QueryRow(ctx, query, clickhouse.Named("metric_names", metricNames)).Scan(&count, &minFirstReported)
if err != nil {
r.logger.Error("error getting host metrics existence and earliest time", errorsV2.Attr(err))
r.logger.Error("error getting host metrics existence and earliest time", "error", err)
return 0, 0, err
}
return count, minFirstReported, nil
@@ -4589,7 +4586,7 @@ func getPersonalisedError(err error) error {
if err == nil {
return nil
}
slog.Error("error while reading result", errorsV2.Attr(err))
slog.Error("error while reading result", "error", err)
if strings.Contains(err.Error(), "code: 307") {
return chErrors.ErrResourceBytesLimitExceeded
}
@@ -4667,7 +4664,7 @@ func (r *ClickHouseReader) GetTraceAggregateAttributes(ctx context.Context, req
rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText))
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4733,7 +4730,7 @@ func (r *ClickHouseReader) GetTraceAttributeKeys(ctx context.Context, req *v3.Fi
rows, err = r.db.Query(ctx, query, fmt.Sprintf("%%%s%%", req.SearchText), req.Limit)
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4849,7 +4846,7 @@ func (r *ClickHouseReader) GetTraceAttributeValues(ctx context.Context, req *v3.
}
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -4898,7 +4895,7 @@ func (r *ClickHouseReader) GetSpanAttributeKeysByNames(ctx context.Context, name
rows, err = r.db.Query(ctx, query)
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, fmt.Errorf("error while executing query: %s", err.Error())
}
defer rows.Close()
@@ -5059,7 +5056,7 @@ func (r *ClickHouseReader) ReadRuleStateHistoryByRuleID(
r.logger.Debug("rule state history query", "query", query)
err := r.db.Select(ctx, &history, query)
if err != nil {
r.logger.Error("Error while reading rule state history", errorsV2.Attr(err))
r.logger.Error("Error while reading rule state history", "error", err)
return nil, err
}
@@ -5127,7 +5124,7 @@ func (r *ClickHouseReader) ReadRuleStateHistoryTopContributorsByRuleID(
contributors := []model.RuleStateHistoryContributor{}
err := r.db.Select(ctx, &contributors, query)
if err != nil {
r.logger.Error("Error while reading rule state history", errorsV2.Attr(err))
r.logger.Error("Error while reading rule state history", "error", err)
return nil, err
}
@@ -5428,7 +5425,7 @@ func (r *ClickHouseReader) GetMinAndMaxTimestampForTraceID(ctx context.Context,
err := r.db.QueryRow(ctx, query).Scan(&minTime, &maxTime)
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return 0, 0, err
}
@@ -5474,7 +5471,7 @@ func (r *ClickHouseReader) GetAllMetricFilterAttributeKeys(ctx context.Context,
valueCtx := context.WithValue(ctx, "clickhouse_max_threads", constants.MetricsExplorerClickhouseThreads)
rows, err := r.db.Query(valueCtx, query, common.PastDayRoundOff(), normalized, fmt.Sprintf("%%%s%%", req.SearchText)) //only showing past day data
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
@@ -5520,7 +5517,7 @@ func (r *ClickHouseReader) GetAllMetricFilterAttributeValues(ctx context.Context
rows, err = r.db.Query(valueCtx, query, req.FilterKey, req.FilterKey, fmt.Sprintf("%%%s%%", req.SearchText), common.PastDayRoundOff(), normalized) //only showing past day data
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
defer rows.Close()
@@ -5554,7 +5551,7 @@ func (r *ClickHouseReader) GetAllMetricFilterUnits(ctx context.Context, req *met
valueCtx := context.WithValue(ctx, "clickhouse_max_threads", constants.MetricsExplorerClickhouseThreads)
rows, err := r.db.Query(valueCtx, query, fmt.Sprintf("%%%s%%", req.SearchText))
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
@@ -5585,7 +5582,7 @@ func (r *ClickHouseReader) GetAllMetricFilterTypes(ctx context.Context, req *met
valueCtx := context.WithValue(ctx, "clickhouse_max_threads", constants.MetricsExplorerClickhouseThreads)
rows, err := r.db.Query(valueCtx, query, fmt.Sprintf("%%%s%%", req.SearchText))
if err != nil {
r.logger.Error("Error while executing query", errorsV2.Attr(err))
r.logger.Error("Error while executing query", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
@@ -5824,7 +5821,7 @@ func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, orgID valuer.
queryDuration := time.Since(begin)
r.logger.Info("Time taken to execute metrics query to fetch metrics with high time series", "query", metricsQuery, "args", args, "duration", queryDuration)
if err != nil {
r.logger.Error("Error executing metrics query", errorsV2.Attr(err))
r.logger.Error("Error executing metrics query", "error", err)
return &metrics_explorer.SummaryListMetricsResponse{}, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
defer rows.Close()
@@ -5835,14 +5832,14 @@ func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, orgID valuer.
for rows.Next() {
var metric metrics_explorer.MetricDetail
if err := rows.Scan(&metric.MetricName, &metric.Description, &metric.MetricType, &metric.MetricUnit, &metric.TimeSeries, &response.Total); err != nil {
r.logger.Error("Error scanning metric row", errorsV2.Attr(err))
r.logger.Error("Error scanning metric row", "error", err)
return &response, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
metricNames = append(metricNames, metric.MetricName)
response.Metrics = append(response.Metrics, metric)
}
if err := rows.Err(); err != nil {
r.logger.Error("Error iterating over metric rows", errorsV2.Attr(err))
r.logger.Error("Error iterating over metric rows", "error", err)
return &response, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
// If no metrics were found, return early.
@@ -5929,7 +5926,7 @@ func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, orgID valuer.
queryDuration = time.Since(begin)
r.logger.Info("Time taken to execute list summary query", "query", sampleQuery, "args", args, "duration", queryDuration)
if err != nil {
r.logger.Error("Error executing samples query", errorsV2.Attr(err))
r.logger.Error("Error executing samples query", "error", err)
return &response, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
defer rows.Close()
@@ -5940,20 +5937,20 @@ func (r *ClickHouseReader) ListSummaryMetrics(ctx context.Context, orgID valuer.
var samples uint64
var metricName string
if err := rows.Scan(&samples, &metricName); err != nil {
r.logger.Error("Error scanning sample row", errorsV2.Attr(err))
r.logger.Error("Error scanning sample row", "error", err)
return &response, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
samplesMap[metricName] = samples
}
if err := rows.Err(); err != nil {
r.logger.Error("Error iterating over sample rows", errorsV2.Attr(err))
r.logger.Error("Error iterating over sample rows", "error", err)
return &response, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
//get updated metrics data
batch, apiError := r.GetUpdatedMetricsMetadata(ctx, orgID, metricNames...)
if apiError != nil {
r.logger.Error("Error in getting metrics cached metadata", errorsV2.Attr(apiError))
r.logger.Error("Error in getting metrics cached metadata", "error", apiError)
}
var filteredMetrics []metrics_explorer.MetricDetail
@@ -6045,7 +6042,7 @@ func (r *ClickHouseReader) GetMetricsTimeSeriesPercentage(ctx context.Context, r
duration := time.Since(begin)
r.logger.Info("Time taken to execute time series percentage query", "query", query, "args", args, "duration", duration)
if err != nil {
r.logger.Error("Error executing time series percentage query", errorsV2.Attr(err), "query", query)
r.logger.Error("Error executing time series percentage query", "error", err, "query", query)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
defer rows.Close()
@@ -6054,14 +6051,14 @@ func (r *ClickHouseReader) GetMetricsTimeSeriesPercentage(ctx context.Context, r
for rows.Next() {
var item metrics_explorer.TreeMapResponseItem
if err := rows.Scan(&item.MetricName, &item.TotalValue, &item.Percentage); err != nil {
r.logger.Error("Error scanning row", errorsV2.Attr(err))
r.logger.Error("Error scanning row", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
treemap = append(treemap, item)
}
if err := rows.Err(); err != nil {
r.logger.Error("Error iterating over rows", errorsV2.Attr(err))
r.logger.Error("Error iterating over rows", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
@@ -6112,7 +6109,7 @@ func (r *ClickHouseReader) GetMetricsSamplesPercentage(ctx context.Context, req
duration := time.Since(begin)
r.logger.Info("Time taken to execute samples percentage metric name query to reduce search space", "query", metricsQuery, "start", start, "end", end, "duration", duration)
if err != nil {
r.logger.Error("Error executing samples percentage query", errorsV2.Attr(err))
r.logger.Error("Error executing samples percentage query", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
defer rows.Close()
@@ -6123,13 +6120,13 @@ func (r *ClickHouseReader) GetMetricsSamplesPercentage(ctx context.Context, req
var metricName string
var timeSeries uint64
if err := rows.Scan(&metricName, &timeSeries); err != nil {
r.logger.Error("Error scanning metric row", errorsV2.Attr(err))
r.logger.Error("Error scanning metric row", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
metricNames = append(metricNames, metricName)
}
if err := rows.Err(); err != nil {
r.logger.Error("Error iterating over metric rows", errorsV2.Attr(err))
r.logger.Error("Error iterating over metric rows", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
@@ -6210,7 +6207,7 @@ func (r *ClickHouseReader) GetMetricsSamplesPercentage(ctx context.Context, req
duration = time.Since(begin)
r.logger.Info("Time taken to execute samples percentage query", "query", sampleQuery, "args", args, "duration", duration)
if err != nil {
r.logger.Error("Error executing samples query", errorsV2.Attr(err))
r.logger.Error("Error executing samples query", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
defer rows.Close()
@@ -6220,13 +6217,13 @@ func (r *ClickHouseReader) GetMetricsSamplesPercentage(ctx context.Context, req
for rows.Next() {
var item metrics_explorer.TreeMapResponseItem
if err := rows.Scan(&item.TotalValue, &item.MetricName, &item.Percentage); err != nil {
r.logger.Error("Error scanning row", errorsV2.Attr(err))
r.logger.Error("Error scanning row", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
treemap = append(treemap, item)
}
if err := rows.Err(); err != nil {
r.logger.Error("Error iterating over sample rows", errorsV2.Attr(err))
r.logger.Error("Error iterating over sample rows", "error", err)
return nil, &model.ApiError{Typ: "ClickHouseError", Err: err}
}
@@ -6830,7 +6827,7 @@ func (r *ClickHouseReader) GetUpdatedMetricsMetadata(ctx context.Context, orgID
cacheKey := constants.UpdatedMetricsMetadataCachePrefix + metadata.MetricName
if cacheErr := r.cache.Set(ctx, orgID, cacheKey, metadata, 0); cacheErr != nil {
r.logger.Error("Failed to store metrics metadata in cache", "metric_name", metadata.MetricName, errorsV2.Attr(cacheErr))
r.logger.Error("Failed to store metrics metadata in cache", "metric_name", metadata.MetricName, "error", cacheErr)
}
cachedMetadata[metadata.MetricName] = metadata
found[metadata.MetricName] = struct{}{}
@@ -6871,7 +6868,7 @@ func (r *ClickHouseReader) GetUpdatedMetricsMetadata(ctx context.Context, orgID
cacheKey := constants.UpdatedMetricsMetadataCachePrefix + metadata.MetricName
if cacheErr := r.cache.Set(ctx, orgID, cacheKey, metadata, 0); cacheErr != nil {
r.logger.Error("Failed to cache fallback metadata", "metric_name", metadata.MetricName, errorsV2.Attr(cacheErr))
r.logger.Error("Failed to cache fallback metadata", "metric_name", metadata.MetricName, "error", cacheErr)
}
cachedMetadata[metadata.MetricName] = metadata
}
@@ -6903,7 +6900,7 @@ func (r *ClickHouseReader) SearchTraces(ctx context.Context, params *model.Searc
if err == sql.ErrNoRows {
return &searchSpansResult, nil
}
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, fmt.Errorf("error in processing sql query")
}
@@ -6918,7 +6915,7 @@ func (r *ClickHouseReader) SearchTraces(ctx context.Context, params *model.Searc
query := fmt.Sprintf("SELECT timestamp, duration_nano, span_id, trace_id, has_error, kind, resource_string_service$$name, name, links as references, attributes_string, attributes_number, attributes_bool, resources_string, events, status_message, status_code_string, kind_string FROM %s.%s WHERE trace_id=$1 and ts_bucket_start>=$2 and ts_bucket_start<=$3", r.TraceDB, r.traceTableName)
err = r.db.Select(ctx, &searchScanResponses, query, params.TraceID, strconv.FormatInt(traceSummary.Start.Unix()-1800, 10), strconv.FormatInt(traceSummary.End.Unix(), 10))
if err != nil {
r.logger.Error("Error in processing sql query", errorsV2.Attr(err))
r.logger.Error("Error in processing sql query", "error", err)
return nil, fmt.Errorf("error in processing sql query")
}
@@ -6930,7 +6927,7 @@ func (r *ClickHouseReader) SearchTraces(ctx context.Context, params *model.Searc
ref := []model.OtelSpanRef{}
err := json.Unmarshal([]byte(item.References), &ref)
if err != nil {
r.logger.Error("Error unmarshalling references", errorsV2.Attr(err))
r.logger.Error("Error unmarshalling references", "error", err)
return nil, err
}

View File

@@ -25,8 +25,6 @@ import (
"text/template"
"time"
"github.com/prometheus/prometheus/promql"
"github.com/SigNoz/signoz/pkg/alertmanager"
errorsV2 "github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/middleware"
@@ -37,6 +35,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/app/metricsexplorer"
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/prometheus/prometheus/promql"
"github.com/gorilla/mux"
"github.com/gorilla/websocket"
@@ -253,13 +252,13 @@ func NewAPIHandler(opts APIHandlerOpts, config signoz.Config) (*APIHandler, erro
// TODO(nitya): remote this in later for multitenancy.
orgs, err := opts.Signoz.Modules.OrgGetter.ListByOwnedKeyRange(context.Background())
if err != nil {
aH.logger.Warn("unexpected error while fetching orgs while initializing base api handler", errors.Attr(err))
aH.logger.Warn("unexpected error while fetching orgs while initializing base api handler", "error", err)
}
// if the first org with the first user is created then the setup is complete.
if len(orgs) == 1 {
count, err := opts.Signoz.Modules.UserGetter.CountByOrgID(context.Background(), orgs[0].ID)
if err != nil {
aH.logger.Warn("unexpected error while fetching user count while initializing base api handler", errors.Attr(err))
aH.logger.Warn("unexpected error while fetching user count while initializing base api handler", "error", err)
}
if count > 0 {
@@ -314,7 +313,7 @@ func RespondError(w http.ResponseWriter, apiErr model.BaseApiError, data interfa
Data: data,
})
if err != nil {
slog.Error("error marshalling json response", errors.Attr(err))
slog.Error("error marshalling json response", "error", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@@ -346,7 +345,7 @@ func RespondError(w http.ResponseWriter, apiErr model.BaseApiError, data interfa
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
if n, err := w.Write(b); err != nil {
slog.Error("error writing response", "bytes_written", n, errors.Attr(err))
slog.Error("error writing response", "bytes_written", n, "error", err)
}
}
@@ -358,7 +357,7 @@ func writeHttpResponse(w http.ResponseWriter, data interface{}) {
Data: data,
})
if err != nil {
slog.Error("error marshalling json response", errors.Attr(err))
slog.Error("error marshalling json response", "error", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
@@ -366,7 +365,7 @@ func writeHttpResponse(w http.ResponseWriter, data interface{}) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
if n, err := w.Write(b); err != nil {
slog.Error("error writing response", "bytes_written", n, errors.Attr(err))
slog.Error("error writing response", "bytes_written", n, "error", err)
}
}
@@ -938,14 +937,14 @@ func (aH *APIHandler) metaForLinks(ctx context.Context, rule *ruletypes.Gettable
}
keys = model.GetLogFieldsV3(ctx, params, logFields)
} else {
aH.logger.ErrorContext(ctx, "failed to get log fields using empty keys", errors.Attr(apiErr))
aH.logger.ErrorContext(ctx, "failed to get log fields using empty keys", "error", apiErr)
}
} else if rule.AlertType == ruletypes.AlertTypeTraces {
traceFields, err := aH.reader.GetSpanAttributeKeysByNames(ctx, logsv3.GetFieldNames(rule.PostableRule.RuleCondition.CompositeQuery))
if err == nil {
keys = traceFields
} else {
aH.logger.ErrorContext(ctx, "failed to get span attributes using empty keys", errors.Attr(err))
aH.logger.ErrorContext(ctx, "failed to get span attributes using empty keys", "error", err)
}
}
@@ -1278,14 +1277,14 @@ func (aH *APIHandler) List(rw http.ResponseWriter, r *http.Request) {
installedIntegrationDashboards, apiErr := aH.IntegrationsController.GetDashboardsForInstalledIntegrations(ctx, orgID)
if apiErr != nil {
aH.logger.ErrorContext(ctx, "failed to get dashboards for installed integrations", errors.Attr(apiErr))
aH.logger.ErrorContext(ctx, "failed to get dashboards for installed integrations", "error", apiErr)
} else {
dashboards = append(dashboards, installedIntegrationDashboards...)
}
cloudIntegrationDashboards, apiErr := aH.CloudIntegrationsController.AvailableDashboards(ctx, orgID)
if apiErr != nil {
aH.logger.ErrorContext(ctx, "failed to get dashboards for cloud integrations", errors.Attr(apiErr))
aH.logger.ErrorContext(ctx, "failed to get dashboards for cloud integrations", "error", apiErr)
} else {
dashboards = append(dashboards, cloudIntegrationDashboards...)
}
@@ -1327,7 +1326,7 @@ func (aH *APIHandler) testRule(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
aH.logger.ErrorContext(r.Context(), "error reading request body for test rule", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "error reading request body for test rule", "error", err)
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@@ -1379,7 +1378,7 @@ func (aH *APIHandler) patchRule(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
aH.logger.ErrorContext(r.Context(), "error reading request body for patch rule", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "error reading request body for patch rule", "error", err)
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@@ -1409,7 +1408,7 @@ func (aH *APIHandler) editRule(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
aH.logger.ErrorContext(r.Context(), "error reading request body for edit rule", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "error reading request body for edit rule", "error", err)
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@@ -1434,7 +1433,7 @@ func (aH *APIHandler) createRule(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
body, err := io.ReadAll(r.Body)
if err != nil {
aH.logger.ErrorContext(r.Context(), "error reading request body for create rule", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "error reading request body for create rule", "error", err)
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
@@ -1480,7 +1479,7 @@ func (aH *APIHandler) queryRangeMetrics(w http.ResponseWriter, r *http.Request)
}
if res.Err != nil {
aH.logger.ErrorContext(r.Context(), "error in query range metrics", errors.Attr(res.Err))
aH.logger.ErrorContext(r.Context(), "error in query range metrics", "error", res.Err)
}
if res.Err != nil {
@@ -1535,7 +1534,7 @@ func (aH *APIHandler) queryMetrics(w http.ResponseWriter, r *http.Request) {
}
if res.Err != nil {
aH.logger.ErrorContext(r.Context(), "error in query range metrics", errors.Attr(res.Err))
aH.logger.ErrorContext(r.Context(), "error in query range metrics", "error", res.Err)
}
if res.Err != nil {
@@ -1638,7 +1637,7 @@ func (aH *APIHandler) getServicesTopLevelOps(w http.ResponseWriter, r *http.Requ
var params topLevelOpsParams
err := json.NewDecoder(r.Body).Decode(&params)
if err != nil {
aH.logger.ErrorContext(r.Context(), "error reading request body for get top operations", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "error reading request body for get top operations", "error", err)
}
if params.Service != "" {
@@ -2042,7 +2041,7 @@ func (aH *APIHandler) registerUser(w http.ResponseWriter, r *http.Request) {
}
organization := types.NewOrganization(req.OrgDisplayName, req.OrgName)
user, errv2 := aH.Signoz.Modules.User.CreateFirstUser(r.Context(), organization, req.Name, req.Email, req.Password)
user, errv2 := aH.Signoz.Modules.UserSetter.CreateFirstUser(r.Context(), organization, req.Name, req.Email, req.Password)
if errv2 != nil {
render.Error(w, errv2)
return
@@ -2060,7 +2059,7 @@ func (aH *APIHandler) HandleError(w http.ResponseWriter, err error, statusCode i
return false
}
if statusCode == http.StatusInternalServerError {
aH.logger.Error("internal server error in http handler", errors.Attr(err))
aH.logger.Error("internal server error in http handler", "error", err)
}
structuredResp := structuredResponse{
Errors: []structuredError{
@@ -2154,7 +2153,7 @@ func (aH *APIHandler) onboardProducers(
) {
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -2162,7 +2161,7 @@ func (aH *APIHandler) onboardProducers(
chq, err := kafka.BuildClickHouseQuery(messagingQueue, kafka.KafkaQueue, "onboard_producers")
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build clickhouse query for onboard producers", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build clickhouse query for onboard producers", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -2256,7 +2255,7 @@ func (aH *APIHandler) onboardConsumers(
) {
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -2264,7 +2263,7 @@ func (aH *APIHandler) onboardConsumers(
chq, err := kafka.BuildClickHouseQuery(messagingQueue, kafka.KafkaQueue, "onboard_consumers")
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build clickhouse query for onboard consumers", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build clickhouse query for onboard consumers", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -2403,7 +2402,7 @@ func (aH *APIHandler) onboardKafka(w http.ResponseWriter, r *http.Request) {
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -2411,7 +2410,7 @@ func (aH *APIHandler) onboardKafka(w http.ResponseWriter, r *http.Request) {
queryRangeParams, err := kafka.BuildBuilderQueriesKafkaOnboarding(messagingQueue)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build kafka onboarding queries", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build kafka onboarding queries", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -2513,19 +2512,19 @@ func (aH *APIHandler) getNetworkData(w http.ResponseWriter, r *http.Request) {
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
queryRangeParams, err := kafka.BuildQRParamsWithCache(messagingQueue, "throughput", attributeCache)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for throughput", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for throughput", "error", err)
RespondError(w, apiErr, nil)
return
}
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -2564,12 +2563,12 @@ func (aH *APIHandler) getNetworkData(w http.ResponseWriter, r *http.Request) {
queryRangeParams, err = kafka.BuildQRParamsWithCache(messagingQueue, "fetch-latency", attributeCache)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for fetch latency", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for fetch latency", "error", err)
RespondError(w, apiErr, nil)
return
}
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for fetch latency", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for fetch latency", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -2624,7 +2623,7 @@ func (aH *APIHandler) getProducerData(w http.ResponseWriter, r *http.Request) {
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -2638,13 +2637,13 @@ func (aH *APIHandler) getProducerData(w http.ResponseWriter, r *http.Request) {
queryRangeParams, err := kafka.BuildQueryRangeParams(messagingQueue, "producer", kafkaSpanEval)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer", "error", err)
RespondError(w, apiErr, nil)
return
}
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -2681,7 +2680,7 @@ func (aH *APIHandler) getConsumerData(w http.ResponseWriter, r *http.Request) {
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -2691,13 +2690,13 @@ func (aH *APIHandler) getConsumerData(w http.ResponseWriter, r *http.Request) {
queryRangeParams, err := kafka.BuildQueryRangeParams(messagingQueue, "consumer", kafkaSpanEval)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for consumer", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for consumer", "error", err)
RespondError(w, apiErr, nil)
return
}
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for consumer", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for consumer", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -2739,7 +2738,7 @@ func (aH *APIHandler) getPartitionOverviewLatencyData(w http.ResponseWriter, r *
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -2749,13 +2748,13 @@ func (aH *APIHandler) getPartitionOverviewLatencyData(w http.ResponseWriter, r *
queryRangeParams, err := kafka.BuildQueryRangeParams(messagingQueue, "producer-topic-throughput", kafkaSpanEval)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer topic throughput", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer topic throughput", "error", err)
RespondError(w, apiErr, nil)
return
}
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer topic throughput", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer topic throughput", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -2797,7 +2796,7 @@ func (aH *APIHandler) getConsumerPartitionLatencyData(w http.ResponseWriter, r *
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -2807,13 +2806,13 @@ func (aH *APIHandler) getConsumerPartitionLatencyData(w http.ResponseWriter, r *
queryRangeParams, err := kafka.BuildQueryRangeParams(messagingQueue, "consumer_partition_latency", kafkaSpanEval)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for consumer partition latency", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for consumer partition latency", "error", err)
RespondError(w, apiErr, nil)
return
}
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for consumer partition latency", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for consumer partition latency", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -2857,7 +2856,7 @@ func (aH *APIHandler) getProducerThroughputOverview(w http.ResponseWriter, r *ht
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -2868,13 +2867,13 @@ func (aH *APIHandler) getProducerThroughputOverview(w http.ResponseWriter, r *ht
producerQueryRangeParams, err := kafka.BuildQRParamsWithCache(messagingQueue, "producer-throughput-overview", attributeCache)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer throughput overview", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer throughput overview", "error", err)
RespondError(w, apiErr, nil)
return
}
if err := validateQueryRangeParamsV3(producerQueryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer throughput overview", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer throughput overview", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -2910,12 +2909,12 @@ func (aH *APIHandler) getProducerThroughputOverview(w http.ResponseWriter, r *ht
queryRangeParams, err := kafka.BuildQRParamsWithCache(messagingQueue, "producer-throughput-overview-byte-rate", attributeCache)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer throughput byte rate", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer throughput byte rate", "error", err)
RespondError(w, apiErr, nil)
return
}
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer throughput byte rate", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer throughput byte rate", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -2973,7 +2972,7 @@ func (aH *APIHandler) getProducerThroughputDetails(w http.ResponseWriter, r *htt
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -2983,13 +2982,13 @@ func (aH *APIHandler) getProducerThroughputDetails(w http.ResponseWriter, r *htt
queryRangeParams, err := kafka.BuildQueryRangeParams(messagingQueue, "producer-throughput-details", kafkaSpanEval)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer throughput details", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer throughput details", "error", err)
RespondError(w, apiErr, nil)
return
}
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer throughput details", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer throughput details", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -3031,7 +3030,7 @@ func (aH *APIHandler) getConsumerThroughputOverview(w http.ResponseWriter, r *ht
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -3041,13 +3040,13 @@ func (aH *APIHandler) getConsumerThroughputOverview(w http.ResponseWriter, r *ht
queryRangeParams, err := kafka.BuildQueryRangeParams(messagingQueue, "consumer-throughput-overview", kafkaSpanEval)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for consumer throughput overview", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for consumer throughput overview", "error", err)
RespondError(w, apiErr, nil)
return
}
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for consumer throughput overview", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for consumer throughput overview", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -3089,7 +3088,7 @@ func (aH *APIHandler) getConsumerThroughputDetails(w http.ResponseWriter, r *htt
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -3099,13 +3098,13 @@ func (aH *APIHandler) getConsumerThroughputDetails(w http.ResponseWriter, r *htt
queryRangeParams, err := kafka.BuildQueryRangeParams(messagingQueue, "consumer-throughput-details", kafkaSpanEval)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for consumer throughput details", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for consumer throughput details", "error", err)
RespondError(w, apiErr, nil)
return
}
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for consumer throughput details", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for consumer throughput details", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -3150,7 +3149,7 @@ func (aH *APIHandler) getProducerConsumerEval(w http.ResponseWriter, r *http.Req
messagingQueue, apiErr := ParseKafkaQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse kafka queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -3160,7 +3159,7 @@ func (aH *APIHandler) getProducerConsumerEval(w http.ResponseWriter, r *http.Req
queryRangeParams, err := kafka.BuildQueryRangeParams(messagingQueue, "producer-consumer-eval", kafkaSpanEval)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer consumer eval", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build query range params for producer consumer eval", "error", err)
RespondError(w, &model.ApiError{
Typ: model.ErrorBadData,
Err: err,
@@ -3169,7 +3168,7 @@ func (aH *APIHandler) getProducerConsumerEval(w http.ResponseWriter, r *http.Req
}
if err := validateQueryRangeParamsV3(queryRangeParams); err != nil {
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer consumer eval", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to validate query range params for producer consumer eval", "error", err)
RespondError(w, apiErr, nil)
return
}
@@ -4455,7 +4454,7 @@ func (aH *APIHandler) QueryRangeV3Format(w http.ResponseWriter, r *http.Request)
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil {
aH.logger.ErrorContext(r.Context(), "error parsing query range params", errors.Attr(apiErrorObj.Err))
aH.logger.ErrorContext(r.Context(), "error parsing query range params", "error", apiErrorObj.Err)
RespondError(w, apiErrorObj, nil)
return
}
@@ -4535,7 +4534,7 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que
if apiErr != nil {
aH.logger.ErrorContext(ctx, "failed to report query start for progress tracking",
"query_id", queryIdHeader, errors.Attr(apiErr),
"query_id", queryIdHeader, "error", apiErr,
)
} else {
@@ -4710,7 +4709,7 @@ func (aH *APIHandler) QueryRangeV3(w http.ResponseWriter, r *http.Request) {
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil {
aH.logger.ErrorContext(r.Context(), "error parsing metric query range params", errors.Attr(apiErrorObj.Err))
aH.logger.ErrorContext(r.Context(), "error parsing metric query range params", "error", apiErrorObj.Err)
RespondError(w, apiErrorObj, nil)
return
}
@@ -4718,7 +4717,7 @@ func (aH *APIHandler) QueryRangeV3(w http.ResponseWriter, r *http.Request) {
// add temporality for each metric
temporalityErr := aH.PopulateTemporality(r.Context(), orgID, queryRangeParams)
if temporalityErr != nil {
aH.logger.ErrorContext(r.Context(), "error adding temporality for metrics", errors.Attr(temporalityErr))
aH.logger.ErrorContext(r.Context(), "error adding temporality for metrics", "error", temporalityErr)
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
return
}
@@ -4763,7 +4762,7 @@ func (aH *APIHandler) GetQueryProgressUpdates(w http.ResponseWriter, r *http.Req
if apiErr != nil {
// Shouldn't happen unless query progress requested after query finished
aH.logger.WarnContext(r.Context(), "failed to subscribe to query progress",
"query_id", queryId, errors.Attr(apiErr),
"query_id", queryId, "error", apiErr,
)
return
}
@@ -4773,7 +4772,7 @@ func (aH *APIHandler) GetQueryProgressUpdates(w http.ResponseWriter, r *http.Req
msg, err := json.Marshal(queryProgress)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to serialize progress message",
"query_id", queryId, "progress", queryProgress, errors.Attr(err),
"query_id", queryId, "progress", queryProgress, "error", err,
)
continue
}
@@ -4781,7 +4780,7 @@ func (aH *APIHandler) GetQueryProgressUpdates(w http.ResponseWriter, r *http.Req
err = c.WriteMessage(websocket.TextMessage, msg)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to write progress message to websocket",
"query_id", queryId, "msg", string(msg), errors.Attr(err),
"query_id", queryId, "msg", string(msg), "error", err,
)
break
@@ -4929,7 +4928,7 @@ func (aH *APIHandler) QueryRangeV4(w http.ResponseWriter, r *http.Request) {
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil {
aH.logger.ErrorContext(r.Context(), "error parsing metric query range params", errors.Attr(apiErrorObj.Err))
aH.logger.ErrorContext(r.Context(), "error parsing metric query range params", "error", apiErrorObj.Err)
RespondError(w, apiErrorObj, nil)
return
}
@@ -4938,7 +4937,7 @@ func (aH *APIHandler) QueryRangeV4(w http.ResponseWriter, r *http.Request) {
// add temporality for each metric
temporalityErr := aH.PopulateTemporality(r.Context(), orgID, queryRangeParams)
if temporalityErr != nil {
aH.logger.ErrorContext(r.Context(), "error adding temporality for metrics", errors.Attr(temporalityErr))
aH.logger.ErrorContext(r.Context(), "error adding temporality for metrics", "error", temporalityErr)
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: temporalityErr}, nil)
return
}
@@ -4987,7 +4986,7 @@ func (aH *APIHandler) getQueueOverview(w http.ResponseWriter, r *http.Request) {
queueListRequest, apiErr := ParseQueueBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse queue body", errors.Attr(apiErr.Err))
aH.logger.ErrorContext(r.Context(), "failed to parse queue body", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -4995,7 +4994,7 @@ func (aH *APIHandler) getQueueOverview(w http.ResponseWriter, r *http.Request) {
chq, err := queues2.BuildOverviewQuery(queueListRequest)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build queue overview query", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build queue overview query", "error", err)
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: fmt.Errorf("error building clickhouse query: %v", err)}, nil)
return
}
@@ -5026,7 +5025,7 @@ func (aH *APIHandler) getDomainList(w http.ResponseWriter, r *http.Request) {
// Parse the request body to get third-party query parameters
thirdPartyQueryRequest, apiErr := ParseRequestBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse request body", errors.Attr(apiErr))
aH.logger.ErrorContext(r.Context(), "failed to parse request body", "error", apiErr)
render.Error(w, errorsV2.New(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, apiErr.Error()))
return
}
@@ -5034,7 +5033,7 @@ func (aH *APIHandler) getDomainList(w http.ResponseWriter, r *http.Request) {
// Build the v5 query range request for domain listing
queryRangeRequest, err := thirdpartyapi.BuildDomainList(thirdPartyQueryRequest)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build domain list query", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build domain list query", "error", err)
apiErrObj := errorsV2.New(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, err.Error())
render.Error(w, apiErrObj)
return
@@ -5047,7 +5046,7 @@ func (aH *APIHandler) getDomainList(w http.ResponseWriter, r *http.Request) {
// Execute the query using the v5 querier
result, err := aH.Signoz.Querier.QueryRange(ctx, orgID, queryRangeRequest)
if err != nil {
aH.logger.ErrorContext(r.Context(), "query execution failed", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "query execution failed", "error", err)
apiErrObj := errorsV2.New(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, err.Error())
render.Error(w, apiErrObj)
return
@@ -5086,7 +5085,7 @@ func (aH *APIHandler) getDomainInfo(w http.ResponseWriter, r *http.Request) {
// Parse the request body to get third-party query parameters
thirdPartyQueryRequest, apiErr := ParseRequestBody(r)
if apiErr != nil {
aH.logger.ErrorContext(r.Context(), "failed to parse request body", errors.Attr(apiErr))
aH.logger.ErrorContext(r.Context(), "failed to parse request body", "error", apiErr)
render.Error(w, errorsV2.New(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, apiErr.Error()))
return
}
@@ -5094,7 +5093,7 @@ func (aH *APIHandler) getDomainInfo(w http.ResponseWriter, r *http.Request) {
// Build the v5 query range request for domain info
queryRangeRequest, err := thirdpartyapi.BuildDomainInfo(thirdPartyQueryRequest)
if err != nil {
aH.logger.ErrorContext(r.Context(), "failed to build domain info query", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "failed to build domain info query", "error", err)
apiErrObj := errorsV2.New(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, err.Error())
render.Error(w, apiErrObj)
return
@@ -5107,7 +5106,7 @@ func (aH *APIHandler) getDomainInfo(w http.ResponseWriter, r *http.Request) {
// Execute the query using the v5 querier
result, err := aH.Signoz.Querier.QueryRange(ctx, orgID, queryRangeRequest)
if err != nil {
aH.logger.ErrorContext(r.Context(), "query execution failed", errors.Attr(err))
aH.logger.ErrorContext(r.Context(), "query execution failed", "error", err)
apiErrObj := errorsV2.New(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, err.Error())
render.Error(w, apiErrObj)
return

View File

@@ -7,8 +7,6 @@ import (
"slices"
"strings"
"github.com/google/uuid"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
"github.com/SigNoz/signoz/pkg/query-service/constants"
@@ -21,6 +19,7 @@ import (
"github.com/SigNoz/signoz/pkg/types/opamptypes"
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/google/uuid"
"log/slog"
)
@@ -176,7 +175,7 @@ func (ic *LogParsingPipelineController) getEffectivePipelinesByVersion(
if version >= 0 {
savedPipelines, err := ic.getPipelinesByVersion(ctx, orgID.String(), version)
if err != nil {
slog.ErrorContext(ctx, "failed to get pipelines for version", "version", version, errors.Attr(err))
slog.ErrorContext(ctx, "failed to get pipelines for version", "version", version, "error", err)
return nil, err
}
result = savedPipelines
@@ -228,7 +227,7 @@ func (ic *LogParsingPipelineController) GetPipelinesByVersion(
) (*PipelinesResponse, error) {
pipelines, err := ic.getEffectivePipelinesByVersion(ctx, orgId, version)
if err != nil {
slog.ErrorContext(ctx, "failed to get pipelines for version", "version", version, errors.Attr(err))
slog.ErrorContext(ctx, "failed to get pipelines for version", "version", version, "error", err)
return nil, err
}
@@ -236,7 +235,7 @@ func (ic *LogParsingPipelineController) GetPipelinesByVersion(
if version >= 0 {
cv, err := agentConf.GetConfigVersion(ctx, orgId, opamptypes.ElementTypeLogPipelines, version)
if err != nil {
slog.ErrorContext(ctx, "failed to get config for version", "version", version, errors.Attr(err))
slog.ErrorContext(ctx, "failed to get config for version", "version", version, "error", err)
return nil, err
}
configVersion = cv

View File

@@ -81,7 +81,7 @@ func (r *Repo) insertPipeline(
Model(&insertRow.StoreablePipeline).
Exec(ctx)
if err != nil {
slog.ErrorContext(ctx, "error in inserting pipeline data", errors.Attr(err))
slog.ErrorContext(ctx, "error in inserting pipeline data", "error", err)
return nil, errors.WrapInternalf(err, errors.CodeInternal, "failed to insert pipeline")
}
@@ -137,7 +137,7 @@ func (r *Repo) GetPipeline(
Where("org_id = ?", orgID).
Scan(ctx)
if err != nil {
slog.ErrorContext(ctx, "failed to get ingestion pipeline from db", errors.Attr(err))
slog.ErrorContext(ctx, "failed to get ingestion pipeline from db", "error", err)
return nil, errors.WrapInternalf(err, errors.CodeInternal, "failed to get ingestion pipeline from db")
}
@@ -150,11 +150,11 @@ func (r *Repo) GetPipeline(
gettablePipeline := pipelinetypes.GettablePipeline{}
gettablePipeline.StoreablePipeline = storablePipelines[0]
if err := gettablePipeline.ParseRawConfig(); err != nil {
slog.ErrorContext(ctx, "invalid pipeline config found", "id", id, errors.Attr(err))
slog.ErrorContext(ctx, "invalid pipeline config found", "id", id, "error", err)
return nil, err
}
if err := gettablePipeline.ParseFilter(); err != nil {
slog.ErrorContext(ctx, "invalid pipeline filter found", "id", id, errors.Attr(err))
slog.ErrorContext(ctx, "invalid pipeline filter found", "id", id, "error", err)
return nil, err
}
return &gettablePipeline, nil

View File

@@ -5,16 +5,11 @@ import (
"encoding/json"
"errors"
"sort"
"strings"
"time"
signozerrors "github.com/SigNoz/signoz/pkg/errors"
"log/slog"
"golang.org/x/sync/errgroup"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
"github.com/SigNoz/signoz/pkg/query-service/model"
@@ -23,6 +18,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/rules"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"golang.org/x/sync/errgroup"
)
type SummaryService struct {
@@ -177,14 +173,14 @@ func (receiver *SummaryService) GetMetricsSummary(ctx context.Context, orgID val
if data != nil {
jsonData, err := json.Marshal(data)
if err != nil {
slog.Error("error marshalling data", signozerrors.Attr(err))
slog.Error("error marshalling data", "error", err)
return &model.ApiError{Typ: "MarshallingErr", Err: err}
}
var dashboards map[string][]metrics_explorer.Dashboard
err = json.Unmarshal(jsonData, &dashboards)
if err != nil {
slog.Error("error unmarshalling data", signozerrors.Attr(err))
slog.Error("error unmarshalling data", "error", err)
return &model.ApiError{Typ: "UnMarshallingErr", Err: err}
}
if _, ok := dashboards[metricName]; ok {
@@ -354,12 +350,12 @@ func (receiver *SummaryService) GetRelatedMetrics(ctx context.Context, params *m
if names != nil {
jsonData, err := json.Marshal(names)
if err != nil {
slog.Error("error marshalling dashboard data", signozerrors.Attr(err))
slog.Error("error marshalling dashboard data", "error", err)
return &model.ApiError{Typ: "MarshallingErr", Err: err}
}
err = json.Unmarshal(jsonData, &dashboardsRelatedData)
if err != nil {
slog.Error("error unmarshalling dashboard data", signozerrors.Attr(err))
slog.Error("error unmarshalling dashboard data", "error", err)
return &model.ApiError{Typ: "UnMarshallingErr", Err: err}
}
}

View File

@@ -5,13 +5,12 @@ import (
"crypto/sha256"
"log/slog"
"github.com/knadh/koanf/parsers/yaml"
"github.com/open-telemetry/opamp-go/protobufs"
"go.opentelemetry.io/collector/confmap"
"github.com/SigNoz/signoz/pkg/errors"
model "github.com/SigNoz/signoz/pkg/query-service/app/opamp/model"
"github.com/SigNoz/signoz/pkg/query-service/app/opamp/otelconfig"
"github.com/knadh/koanf/parsers/yaml"
"github.com/open-telemetry/opamp-go/protobufs"
"go.opentelemetry.io/collector/confmap"
)
var (
@@ -54,7 +53,7 @@ func UpsertControlProcessors(ctx context.Context, signal string,
for _, agent := range agents {
agenthash, err := addIngestionControlToAgent(agent, signal, processors, false)
if err != nil {
slog.Error("failed to push ingestion rules config to agent", "agent_id", agent.AgentID, errors.Attr(err))
slog.Error("failed to push ingestion rules config to agent", "agent_id", agent.AgentID, "error", err)
continue
}
@@ -83,7 +82,7 @@ func addIngestionControlToAgent(agent *model.Agent, signal string, processors ma
// add ingestion control spec
err = makeIngestionControlSpec(agentConf, Signal(signal), processors)
if err != nil {
slog.Error("failed to prepare ingestion control processors for agent", "agent_id", agent.AgentID, errors.Attr(err))
slog.Error("failed to prepare ingestion control processors for agent", "agent_id", agent.AgentID, "error", err)
return confHash, err
}
@@ -134,7 +133,7 @@ func makeIngestionControlSpec(agentConf *confmap.Conf, signal Signal, processors
// merge tracesPipelinePlan with current pipeline
mergedPipeline, err := buildPipeline(signal, currentPipeline)
if err != nil {
slog.Error("failed to build pipeline", "signal", string(signal), errors.Attr(err))
slog.Error("failed to build pipeline", "signal", string(signal), "error", err)
return err
}

View File

@@ -8,12 +8,10 @@ import (
"sync"
"time"
"google.golang.org/protobuf/proto"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/opamptypes"
"github.com/SigNoz/signoz/pkg/valuer"
"google.golang.org/protobuf/proto"
"github.com/open-telemetry/opamp-go/protobufs"
opampTypes "github.com/open-telemetry/opamp-go/server/types"
@@ -86,7 +84,7 @@ func (agent *Agent) KeepOnlyLast50Agents(ctx context.Context) {
Limit(50)).
Exec(ctx)
if err != nil {
agent.logger.Error("failed to delete old agents", errors.Attr(err))
agent.logger.Error("failed to delete old agents", "error", err)
}
}
@@ -315,7 +313,7 @@ func (agent *Agent) processStatusUpdate(
func (agent *Agent) updateRemoteConfig(configProvider AgentConfigProvider) bool {
recommendedConfig, confId, err := configProvider.RecommendAgentConfig(agent.OrgID, []byte(agent.Config))
if err != nil {
agent.logger.Error("could not generate config recommendation for agent", "agent_id", agent.AgentID, errors.Attr(err))
agent.logger.Error("could not generate config recommendation for agent", "agent_id", agent.AgentID, "error", err)
return false
}

View File

@@ -6,14 +6,12 @@ import (
"net/http"
"time"
"github.com/open-telemetry/opamp-go/protobufs"
"github.com/open-telemetry/opamp-go/server"
"github.com/open-telemetry/opamp-go/server/types"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/instrumentation"
model "github.com/SigNoz/signoz/pkg/query-service/app/opamp/model"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/open-telemetry/opamp-go/protobufs"
"github.com/open-telemetry/opamp-go/server"
"github.com/open-telemetry/opamp-go/server/types"
)
var opAmpServer *Server
@@ -74,7 +72,7 @@ func (srv *Server) Start(listener string) error {
err := srv.agents.RecommendLatestConfigToAll(srv.agentConfigProvider)
if err != nil {
srv.logger.Error(
"could not roll out latest config recommendation to connected agents", errors.Attr(err),
"could not roll out latest config recommendation to connected agents", "error", err,
)
}
})
@@ -117,7 +115,7 @@ func (srv *Server) OnMessage(ctx context.Context, conn types.Connection, msg *pr
// agents sends the effective config when we processStatusUpdate.
agent, created, err := srv.agents.FindOrCreateAgent(agentID.String(), conn, orgID)
if err != nil {
srv.logger.Error("failed to find or create agent", "agent_id", agentID.String(), errors.Attr(err))
srv.logger.Error("failed to find or create agent", "agent_id", agentID.String(), "error", err)
// Return error response according to OpAMP protocol
return &protobufs.ServerToAgent{

View File

@@ -6,7 +6,6 @@ import (
"encoding/json"
"errors"
"fmt"
"math"
"net/http"
"sort"
@@ -15,16 +14,12 @@ import (
"text/template"
"time"
signozerrors "github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types/thirdpartyapitypes"
"log/slog"
"github.com/SigNoz/govaluate"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations/messagingQueues/kafka"
queues2 "github.com/SigNoz/signoz/pkg/query-service/app/integrations/messagingQueues/queues"
"log/slog"
"github.com/gorilla/mux"
promModel "github.com/prometheus/common/model"
@@ -746,7 +741,7 @@ func chTransformQuery(query string, variables map[string]interface{}) {
transformer := chVariables.NewQueryTransformer(query, varsForTransform)
transformedQuery, err := transformer.Transform()
if err != nil {
slog.Warn("failed to transform clickhouse query", "query", query, signozerrors.Attr(err))
slog.Warn("failed to transform clickhouse query", "query", query, "error", err)
}
slog.Info("transformed clickhouse query", "transformed_query", transformedQuery, "original_query", query)
}

View File

@@ -3,12 +3,10 @@ package v2
import (
"context"
"fmt"
"github.com/prometheus/prometheus/promql/parser"
"strings"
"sync"
"github.com/prometheus/prometheus/promql/parser"
"github.com/SigNoz/signoz/pkg/errors"
logsV4 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v4"
metricsV3 "github.com/SigNoz/signoz/pkg/query-service/app/metrics/v3"
metricsV4 "github.com/SigNoz/signoz/pkg/query-service/app/metrics/v4"
@@ -287,7 +285,7 @@ func (q *querier) ValidateMetricNames(ctx context.Context, query *v3.CompositeQu
for _, query := range query.PromQueries {
expr, err := parser.ParseExpr(query.Query)
if err != nil {
q.logger.DebugContext(ctx, "error parsing promql expression", "query", query.Query, errors.Attr(err))
q.logger.DebugContext(ctx, "error parsing promql expression", "query", query.Query, "error", err)
continue
}
parser.Inspect(expr, func(node parser.Node, path []parser.Node) error {
@@ -303,7 +301,7 @@ func (q *querier) ValidateMetricNames(ctx context.Context, query *v3.CompositeQu
}
metrics, err := q.reader.GetNormalizedStatus(ctx, orgID, metricNames)
if err != nil {
q.logger.DebugContext(ctx, "error getting corresponding normalized metrics", errors.Attr(err))
q.logger.DebugContext(ctx, "error getting corresponding normalized metrics", "error", err)
return
}
for metricName, metricPresent := range metrics {
@@ -321,7 +319,7 @@ func (q *querier) ValidateMetricNames(ctx context.Context, query *v3.CompositeQu
}
metrics, err := q.reader.GetNormalizedStatus(ctx, orgID, metricNames)
if err != nil {
q.logger.DebugContext(ctx, "error getting corresponding normalized metrics", errors.Attr(err))
q.logger.DebugContext(ctx, "error getting corresponding normalized metrics", "error", err)
return
}
for metricName, metricPresent := range metrics {

View File

@@ -9,7 +9,6 @@ import (
"slices"
"github.com/SigNoz/signoz/pkg/cache/memorycache"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/queryparser"
"github.com/SigNoz/signoz/pkg/ruler/rulestore/sqlrulestore"
@@ -17,9 +16,6 @@ import (
"github.com/gorilla/handlers"
"github.com/rs/cors"
"github.com/soheilhy/cmux"
"github.com/SigNoz/signoz/pkg/alertmanager"
"github.com/SigNoz/signoz/pkg/cache"
"github.com/SigNoz/signoz/pkg/http/middleware"
@@ -39,16 +35,16 @@ import (
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/web"
"log/slog"
"go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux"
"go.opentelemetry.io/otel/propagation"
"github.com/rs/cors"
"github.com/soheilhy/cmux"
"github.com/SigNoz/signoz/pkg/query-service/constants"
"github.com/SigNoz/signoz/pkg/query-service/healthcheck"
"github.com/SigNoz/signoz/pkg/query-service/rules"
"github.com/SigNoz/signoz/pkg/query-service/utils"
"go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux"
"go.opentelemetry.io/otel/propagation"
"log/slog"
)
// Server runs HTTP, Mux and a grpc server
@@ -289,7 +285,7 @@ func (s *Server) Start(ctx context.Context) error {
case nil, http.ErrServerClosed, cmux.ErrListenerClosed:
// normal exit, nothing to do
default:
slog.Error("Could not start HTTP server", errors.Attr(err))
slog.Error("Could not start HTTP server", "error", err)
}
s.unavailableChannel <- healthcheck.Unavailable
}()
@@ -299,7 +295,7 @@ func (s *Server) Start(ctx context.Context) error {
err = http.ListenAndServe(constants.DebugHttpPort, nil)
if err != nil {
slog.Error("Could not start pprof server", errors.Attr(err))
slog.Error("Could not start pprof server", "error", err)
}
}()
@@ -307,7 +303,7 @@ func (s *Server) Start(ctx context.Context) error {
slog.Info("Starting OpAmp Websocket server", "addr", constants.OpAmpWsEndpoint)
err := s.opampServer.Start(constants.OpAmpWsEndpoint)
if err != nil {
slog.Error("opamp ws server failed to start", errors.Attr(err))
slog.Error("opamp ws server failed to start", "error", err)
s.unavailableChannel <- healthcheck.Unavailable
}
}()

View File

@@ -5,7 +5,6 @@ import (
"io"
"net/http"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types/authtypes"
@@ -24,13 +23,13 @@ func (aH *APIHandler) FilterKeysSuggestion(w http.ResponseWriter, r *http.Reques
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
params, apiError := explorer.ParseFilterKeySuggestions(r)
if apiError != nil {
slog.ErrorContext(ctx, "error parsing summary filter keys request", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error parsing summary filter keys request", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
keys, apiError := aH.SummaryService.FilterKeys(ctx, params)
if apiError != nil {
slog.ErrorContext(ctx, "error getting filter keys", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error getting filter keys", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
@@ -54,14 +53,14 @@ func (aH *APIHandler) FilterValuesSuggestion(w http.ResponseWriter, r *http.Requ
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
params, apiError := explorer.ParseFilterValueSuggestions(r)
if apiError != nil {
slog.ErrorContext(ctx, "error parsing summary filter values request", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error parsing summary filter values request", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
values, apiError := aH.SummaryService.FilterValues(ctx, orgID, params)
if apiError != nil {
slog.ErrorContext(ctx, "error getting filter values", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error getting filter values", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
@@ -84,7 +83,7 @@ func (aH *APIHandler) GetMetricsDetails(w http.ResponseWriter, r *http.Request)
metricName := mux.Vars(r)["metric_name"]
metricsDetail, apiError := aH.SummaryService.GetMetricsSummary(ctx, orgID, metricName)
if apiError != nil {
slog.ErrorContext(ctx, "error getting metrics summary", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error getting metrics summary", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
@@ -108,14 +107,14 @@ func (aH *APIHandler) ListMetrics(w http.ResponseWriter, r *http.Request) {
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
params, apiErr := explorer.ParseSummaryListMetricsParams(r)
if apiErr != nil {
slog.ErrorContext(ctx, "error parsing metric list metric summary api request", errors.Attr(apiErr.Err))
slog.ErrorContext(ctx, "error parsing metric list metric summary api request", "error", apiErr.Err)
RespondError(w, model.BadRequest(apiErr), nil)
return
}
slmr, apiErr := aH.SummaryService.ListMetricsWithSummary(ctx, orgID, params)
if apiErr != nil {
slog.ErrorContext(ctx, "error in getting list metrics summary", errors.Attr(apiErr.Err))
slog.ErrorContext(ctx, "error in getting list metrics summary", "error", apiErr.Err)
RespondError(w, apiErr, nil)
return
}
@@ -128,13 +127,13 @@ func (aH *APIHandler) GetTreeMap(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
params, apiError := explorer.ParseTreeMapMetricsParams(r)
if apiError != nil {
slog.ErrorContext(ctx, "error parsing tree map metric params", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error parsing tree map metric params", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
result, apiError := aH.SummaryService.GetMetricsTreemap(ctx, params)
if apiError != nil {
slog.ErrorContext(ctx, "error getting tree map data", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error getting tree map data", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
@@ -148,13 +147,13 @@ func (aH *APIHandler) GetRelatedMetrics(w http.ResponseWriter, r *http.Request)
ctx := r.Context()
params, apiError := explorer.ParseRelatedMetricsParams(r)
if apiError != nil {
slog.ErrorContext(ctx, "error parsing related metric params", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error parsing related metric params", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
result, apiError := aH.SummaryService.GetRelatedMetrics(ctx, params)
if apiError != nil {
slog.ErrorContext(ctx, "error getting related metrics", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error getting related metrics", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
@@ -168,13 +167,13 @@ func (aH *APIHandler) GetInspectMetricsData(w http.ResponseWriter, r *http.Reque
ctx := r.Context()
params, apiError := explorer.ParseInspectMetricsParams(r)
if apiError != nil {
slog.ErrorContext(ctx, "error parsing inspect metric params", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error parsing inspect metric params", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
result, apiError := aH.SummaryService.GetInspectMetrics(ctx, params)
if apiError != nil {
slog.ErrorContext(ctx, "error getting inspect metrics data", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error getting inspect metrics data", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
@@ -199,13 +198,13 @@ func (aH *APIHandler) UpdateMetricsMetadata(w http.ResponseWriter, r *http.Reque
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
params, apiError := explorer.ParseUpdateMetricsMetadataParams(r)
if apiError != nil {
slog.ErrorContext(ctx, "error parsing update metrics metadata params", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error parsing update metrics metadata params", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}
apiError = aH.SummaryService.UpdateMetricsMetadata(ctx, orgID, params)
if apiError != nil {
slog.ErrorContext(ctx, "error updating metrics metadata", errors.Attr(apiError.Err))
slog.ErrorContext(ctx, "error updating metrics metadata", "error", apiError.Err)
RespondError(w, apiError, nil)
return
}

View File

@@ -5,7 +5,6 @@ import (
"log/slog"
"strconv"
signozerrors "github.com/SigNoz/signoz/pkg/errors"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
)
@@ -54,7 +53,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
break
}
if err != nil {
slog.Error("error during breadth first search", signozerrors.Attr(err))
slog.Error("error during breadth first search", "error", err)
return nil, err
}
}

View File

@@ -4,7 +4,6 @@ import (
"log/slog"
"strconv"
"github.com/SigNoz/signoz/pkg/errors"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/utils"
)
@@ -69,7 +68,7 @@ func TraceIdFilterUsedWithEqual(params *v3.QueryRangeParamsV3) (bool, []string)
val := item.Value
val, err = utils.ValidateAndCastValue(val, item.Key.DataType)
if err != nil {
slog.Error("invalid value for key", "key", item.Key.Key, errors.Attr(err))
slog.Error("invalid value for key", "key", item.Key.Key, "error", err)
return false, []string{}
}
if val != nil {

View File

@@ -11,10 +11,8 @@ import (
"log/slog"
"github.com/pkg/errors"
signozerrors "github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/pkg/errors"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
)
@@ -940,7 +938,7 @@ func (b *BuilderQuery) SetShiftByFromFunc() {
} else if shift, ok := function.Args[0].(string); ok {
shiftBy, err := strconv.ParseFloat(shift, 64)
if err != nil {
slog.Error("failed to parse time shift by", "shift", shift, signozerrors.Attr(err))
slog.Error("failed to parse time shift by", "shift", shift, "error", err)
}
timeShiftBy = int64(shiftBy)
}

View File

@@ -4,8 +4,6 @@ import (
"log/slog"
"github.com/SigNoz/govaluate"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
)
@@ -58,12 +56,12 @@ func PostProcessResult(result []*v3.Result, queryRangeParams *v3.QueryRangeParam
expression, err := govaluate.NewEvaluableExpressionWithFunctions(query.Expression, EvalFuncs())
// This shouldn't happen here, because it should have been caught earlier in validation
if err != nil {
slog.Error("error in expression", errors.Attr(err))
slog.Error("error in expression", "error", err)
return nil, err
}
formulaResult, err := processResults(result, expression, canDefaultZero)
if err != nil {
slog.Error("error in expression", errors.Attr(err))
slog.Error("error in expression", "error", err)
return nil, err
}
formulaResult.QueryName = query.QueryName

Some files were not shown because too many files have changed in this diff Show More