Compare commits

..

8 Commits

Author SHA1 Message Date
Srikanth Chekuri
b1f1cd5b54 Merge branch 'main' into fix-issues 2026-02-16 15:11:01 +05:30
srikanthccv
2fa0c94c92 Merge branch 'fix-issues' of github.com:SigNoz/signoz into fix-issues 2026-02-16 14:45:35 +05:30
srikanthccv
312acf16fb chore: update validation.go 2026-02-16 14:45:13 +05:30
Srikanth Chekuri
e13ea23a5d Merge branch 'main' into fix-issues 2026-02-16 12:25:47 +05:30
srikanthccv
4b117c85e6 chore: update return 2026-02-15 12:44:09 +05:30
srikanthccv
3e1a48acf9 chore: update tests 2026-02-15 12:38:48 +05:30
Srikanth Chekuri
6d282dfe68 Merge branch 'main' into fix-issues 2026-02-14 22:42:14 +05:30
srikanthccv
e3c04b378a fix: update rate/increase query and address several issues in builder queries 2026-02-14 11:02:09 +05:30
55 changed files with 1544 additions and 1969 deletions

View File

@@ -176,25 +176,6 @@ Wir haben Benchmarks veröffentlicht, die Loki mit SigNoz vergleichen. Schauen S
Wir ❤️ Beiträge zum Projekt, egal ob große oder kleine. Bitte lies dir zuerst die [CONTRIBUTING.md](CONTRIBUTING.md), durch, bevor du anfängst, Beiträge zu SigNoz zu machen.
Du bist dir nicht sicher, wie du anfangen sollst? Schreib uns einfach auf dem #contributing Kanal in unserer [slack community](https://signoz.io/slack)
### Unsere Projektbetreuer
#### Backend
- [Ankit Nayan](https://github.com/ankitnayan)
- [Nityananda Gohain](https://github.com/nityanandagohain)
- [Srikanth Chekuri](https://github.com/srikanthccv)
- [Vishal Sharma](https://github.com/makeavish)
#### Frontend
- [Palash Gupta](https://github.com/palashgdev)
- [Yunus M](https://github.com/YounixM)
- [Rajat Dabade](https://github.com/Rajat-Dabade)
#### DevOps
- [Prashant Shahi](https://github.com/prashant-shahi)
<br /><br />
## Dokumentation

View File

@@ -221,34 +221,6 @@ We ❤️ contributions big or small. Please read [CONTRIBUTING.md](CONTRIBUTING
Not sure how to get started? Just ping us on `#contributing` in our [slack community](https://signoz.io/slack)
### Project maintainers
#### Backend
- [Ankit Nayan](https://github.com/ankitnayan)
- [Nityananda Gohain](https://github.com/nityanandagohain)
- [Srikanth Chekuri](https://github.com/srikanthccv)
- [Vishal Sharma](https://github.com/makeavish)
- [Shivanshu Raj Shrivastava](https://github.com/shivanshuraj1333)
- [Ekansh Gupta](https://github.com/eKuG)
- [Aniket Agarwal](https://github.com/aniketio-ctrl)
#### Frontend
- [Yunus M](https://github.com/YounixM)
- [Vikrant Gupta](https://github.com/vikrantgupta25)
- [Sagar Rajput](https://github.com/SagarRajput-7)
- [Shaheer Kochai](https://github.com/ahmadshaheer)
- [Amlan Kumar Nandy](https://github.com/amlannandy)
- [Sahil Khan](https://github.com/sawhil)
- [Aditya Singh](https://github.com/aks07)
- [Abhi Kumar](https://github.com/ahrefabhi)
#### DevOps
- [Prashant Shahi](https://github.com/prashant-shahi)
- [Vibhu Pandey](https://github.com/therealpandey)
<br /><br />

View File

@@ -187,25 +187,6 @@ Jaeger 仅仅是一个分布式追踪系统。 但是 SigNoz 可以提供 metric
如果你不知道如何开始? 只需要在 [slack 社区](https://signoz.io/slack) 通过 `#contributing` 频道联系我们。
### 项目维护人员
#### 后端
- [Ankit Nayan](https://github.com/ankitnayan)
- [Nityananda Gohain](https://github.com/nityanandagohain)
- [Srikanth Chekuri](https://github.com/srikanthccv)
- [Vishal Sharma](https://github.com/makeavish)
#### 前端
- [Palash Gupta](https://github.com/palashgdev)
- [Yunus M](https://github.com/YounixM)
- [Rajat Dabade](https://github.com/Rajat-Dabade)
#### 运维开发
- [Prashant Shahi](https://github.com/prashant-shahi)
<br /><br />
## 文档

View File

@@ -294,7 +294,6 @@ flagger:
config:
boolean:
use_span_metrics: true
interpolation_enabled: false
kafka_span_eval: false
string:
float:
@@ -309,14 +308,3 @@ user:
allow_self: true
# The duration within which a user can reset their password.
max_token_lifetime: 6h
root:
# Whether to enable the root user. When enabled, a root user is provisioned
# on startup using the email and password below. The root user cannot be
# deleted, updated, or have their password changed through the UI.
enabled: false
# The email address of the root user.
email: ""
# The password of the root user. Must meet password requirements.
password: ""
# The name of the organization to create or look up for the root user.
org_name: default

View File

@@ -4678,8 +4678,6 @@ components:
type: string
id:
type: string
isRoot:
type: boolean
orgId:
type: string
role:

View File

@@ -45,7 +45,7 @@ type APIHandler struct {
}
// NewAPIHandler returns an APIHandler
func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz, config signoz.Config) (*APIHandler, error) {
func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler, error) {
baseHandler, err := baseapp.NewAPIHandler(baseapp.APIHandlerOpts{
Reader: opts.DataConnector,
RuleManager: opts.RulesManager,
@@ -58,7 +58,7 @@ func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz, config signoz.
Signoz: signoz,
QuerierAPI: querierAPI.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.Querier, signoz.Analytics),
QueryParserAPI: queryparser.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.QueryParser),
}, config)
})
if err != nil {
return nil, err

View File

@@ -175,7 +175,7 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
GlobalConfig: config.Global,
}
apiHandler, err := api.NewAPIHandler(apiOpts, signoz, config)
apiHandler, err := api.NewAPIHandler(apiOpts, signoz)
if err != nil {
return nil, err
}

View File

@@ -1542,10 +1542,6 @@ export interface TypesUserDTO {
* @type string
*/
id?: string;
/**
* @type boolean
*/
isRoot?: boolean;
/**
* @type string
*/

View File

@@ -3,9 +3,8 @@ package flagger
import "github.com/SigNoz/signoz/pkg/types/featuretypes"
var (
FeatureUseSpanMetrics = featuretypes.MustNewName("use_span_metrics")
FeatureInterpolationEnabled = featuretypes.MustNewName("interpolation_enabled")
FeatureKafkaSpanEval = featuretypes.MustNewName("kafka_span_eval")
FeatureUseSpanMetrics = featuretypes.MustNewName("use_span_metrics")
FeatureKafkaSpanEval = featuretypes.MustNewName("kafka_span_eval")
)
func MustNewRegistry() featuretypes.Registry {
@@ -18,14 +17,6 @@ func MustNewRegistry() featuretypes.Registry {
DefaultVariant: featuretypes.MustNewName("disabled"),
Variants: featuretypes.NewBooleanVariants(),
},
&featuretypes.Feature{
Name: FeatureInterpolationEnabled,
Kind: featuretypes.KindBoolean,
Stage: featuretypes.StageExperimental,
Description: "Controls whether to enable interpolation",
DefaultVariant: featuretypes.MustNewName("disabled"),
Variants: featuretypes.NewBooleanVariants(),
},
&featuretypes.Feature{
Name: FeatureKafkaSpanEval,
Kind: featuretypes.KindBoolean,

View File

@@ -30,7 +30,3 @@ func (module *getter) ListByOwnedKeyRange(ctx context.Context) ([]*types.Organiz
return module.store.ListByKeyRange(ctx, start, end)
}
func (module *getter) GetByName(ctx context.Context, name string) (*types.Organization, error) {
return module.store.GetByName(ctx, name)
}

View File

@@ -47,22 +47,6 @@ func (store *store) Get(ctx context.Context, id valuer.UUID) (*types.Organizatio
return organization, nil
}
func (store *store) GetByName(ctx context.Context, name string) (*types.Organization, error) {
organization := new(types.Organization)
err := store.
sqlstore.
BunDB().
NewSelect().
Model(organization).
Where("name = ?", name).
Scan(ctx)
if err != nil {
return nil, store.sqlstore.WrapNotFoundErrf(err, types.ErrOrganizationNotFound, "organization with name %s does not exist", name)
}
return organization, nil
}
func (store *store) GetAll(ctx context.Context) ([]*types.Organization, error) {
organizations := make([]*types.Organization, 0)
err := store.

View File

@@ -14,9 +14,6 @@ type Getter interface {
// ListByOwnedKeyRange gets all the organizations owned by the instance
ListByOwnedKeyRange(context.Context) ([]*types.Organization, error)
// Gets the organization by name
GetByName(context.Context, string) (*types.Organization, error)
}
type Setter interface {

View File

@@ -151,10 +151,6 @@ func (module *module) CreateCallbackAuthNSession(ctx context.Context, authNProvi
return "", err
}
if err := user.ErrIfRoot(); err != nil {
return "", errors.WithAdditionalf(err, "root user can only authenticate via password")
}
token, err := module.tokenizer.CreateToken(ctx, authtypes.NewIdentity(user.ID, user.OrgID, user.Email, user.Role), map[string]string{})
if err != nil {
return "", err

View File

@@ -5,22 +5,11 @@ import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/valuer"
)
type Config struct {
Password PasswordConfig `mapstructure:"password"`
Root RootConfig `mapstructure:"root"`
}
type RootConfig struct {
Enabled bool `mapstructure:"enabled"`
Email valuer.Email `mapstructure:"email"`
Password string `mapstructure:"password"`
OrgName string `mapstructure:"org_name"`
}
type PasswordConfig struct {
Reset ResetConfig `mapstructure:"reset"`
}
@@ -42,10 +31,6 @@ func newConfig() factory.Config {
MaxTokenLifetime: 6 * time.Hour,
},
},
Root: RootConfig{
Enabled: false,
OrgName: "default",
},
}
}
@@ -54,17 +39,5 @@ func (c Config) Validate() error {
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "user::password::reset::max_token_lifetime must be positive")
}
if c.Root.Enabled {
if c.Root.Email.IsZero() {
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "user::root::email is required when root user is enabled")
}
if c.Root.Password == "" {
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "user::root::password is required when root user is enabled")
}
if !types.IsPasswordValid(c.Root.Password) {
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "user::root::password does not meet password requirements")
}
}
return nil
}

View File

@@ -16,10 +16,6 @@ func NewGetter(store types.UserStore) user.Getter {
return &getter{store: store}
}
func (module *getter) GetRootUserByOrgID(ctx context.Context, orgID valuer.UUID) (*types.User, error) {
return module.store.GetRootUserByOrgID(ctx, orgID)
}
func (module *getter) ListByOrgID(ctx context.Context, orgID valuer.UUID) ([]*types.User, error) {
users, err := module.store.ListUsersByOrgID(ctx, orgID)
if err != nil {

View File

@@ -103,12 +103,6 @@ func (m *Module) CreateBulkInvite(ctx context.Context, orgID valuer.UUID, userID
return nil, err
}
if existingUser != nil {
if err := existingUser.ErrIfRoot(); err != nil {
return nil, errors.WithAdditionalf(err, "cannot send invite to root user")
}
}
if existingUser != nil {
return nil, errors.New(errors.TypeAlreadyExists, errors.CodeAlreadyExists, "User already exists with the same email")
}
@@ -208,21 +202,27 @@ func (m *Module) UpdateUser(ctx context.Context, orgID valuer.UUID, id string, u
return nil, err
}
if err := existingUser.ErrIfRoot(); err != nil {
return nil, errors.WithAdditionalf(err, "cannot update root user")
}
requestor, err := m.store.GetUser(ctx, valuer.MustNewUUID(updatedBy))
if err != nil {
return nil, err
}
if user.Role != "" && user.Role != existingUser.Role && requestor.Role != types.RoleAdmin {
// only displayName, role can be updated
if user.DisplayName == "" {
user.DisplayName = existingUser.DisplayName
}
if user.Role == "" {
user.Role = existingUser.Role
}
if user.Role != existingUser.Role && requestor.Role != types.RoleAdmin {
return nil, errors.New(errors.TypeForbidden, errors.CodeForbidden, "only admins can change roles")
}
// Make sure that the request is not demoting the last admin user.
if user.Role != "" && user.Role != existingUser.Role && existingUser.Role == types.RoleAdmin {
// Make sure that th e request is not demoting the last admin user.
// also an admin user can only change role of their own or other user
if user.Role != existingUser.Role && existingUser.Role == types.RoleAdmin {
adminUsers, err := m.store.GetUsersByRoleAndOrgID(ctx, types.RoleAdmin, orgID)
if err != nil {
return nil, err
@@ -233,7 +233,7 @@ func (m *Module) UpdateUser(ctx context.Context, orgID valuer.UUID, id string, u
}
}
if user.Role != "" && user.Role != existingUser.Role {
if user.Role != existingUser.Role {
err = m.authz.ModifyGrant(ctx,
orgID,
roletypes.MustGetSigNozManagedRoleFromExistingRole(existingUser.Role),
@@ -245,28 +245,23 @@ func (m *Module) UpdateUser(ctx context.Context, orgID valuer.UUID, id string, u
}
}
existingUser.Update(user.DisplayName, user.Role)
if err := m.UpdateAnyUser(ctx, orgID, existingUser); err != nil {
user.UpdatedAt = time.Now()
updatedUser, err := m.store.UpdateUser(ctx, orgID, id, user)
if err != nil {
return nil, err
}
return existingUser, nil
}
traits := types.NewTraitsFromUser(updatedUser)
m.analytics.IdentifyUser(ctx, user.OrgID.String(), user.ID.String(), traits)
func (module *Module) UpdateAnyUser(ctx context.Context, orgID valuer.UUID, user *types.User) error {
if err := module.store.UpdateUser(ctx, orgID, user); err != nil {
return err
traits["updated_by"] = updatedBy
m.analytics.TrackUser(ctx, user.OrgID.String(), user.ID.String(), "User Updated", traits)
if err := m.tokenizer.DeleteIdentity(ctx, valuer.MustNewUUID(id)); err != nil {
return nil, err
}
traits := types.NewTraitsFromUser(user)
module.analytics.IdentifyUser(ctx, user.OrgID.String(), user.ID.String(), traits)
module.analytics.TrackUser(ctx, user.OrgID.String(), user.ID.String(), "User Updated", traits)
if err := module.tokenizer.DeleteIdentity(ctx, user.ID); err != nil {
return err
}
return nil
return updatedUser, nil
}
func (module *Module) DeleteUser(ctx context.Context, orgID valuer.UUID, id string, deletedBy string) error {
@@ -275,10 +270,6 @@ func (module *Module) DeleteUser(ctx context.Context, orgID valuer.UUID, id stri
return err
}
if err := user.ErrIfRoot(); err != nil {
return errors.WithAdditionalf(err, "cannot delete root user")
}
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(user.Email.String())) {
return errors.New(errors.TypeForbidden, errors.CodeForbidden, "integration user cannot be deleted")
}
@@ -373,10 +364,6 @@ func (module *Module) ForgotPassword(ctx context.Context, orgID valuer.UUID, ema
return err
}
if err := user.ErrIfRoot(); err != nil {
return errors.WithAdditionalf(err, "cannot reset password for root user")
}
token, err := module.GetOrCreateResetPasswordToken(ctx, user.ID)
if err != nil {
module.settings.Logger().ErrorContext(ctx, "failed to create reset password token", "error", err)
@@ -420,15 +407,6 @@ func (module *Module) UpdatePasswordByResetPasswordToken(ctx context.Context, to
return err
}
user, err := module.store.GetUser(ctx, valuer.MustNewUUID(password.UserID))
if err != nil {
return err
}
if err := user.ErrIfRoot(); err != nil {
return errors.WithAdditionalf(err, "cannot reset password for root user")
}
if err := password.Update(passwd); err != nil {
return err
}
@@ -437,15 +415,6 @@ func (module *Module) UpdatePasswordByResetPasswordToken(ctx context.Context, to
}
func (module *Module) UpdatePassword(ctx context.Context, userID valuer.UUID, oldpasswd string, passwd string) error {
user, err := module.store.GetUser(ctx, userID)
if err != nil {
return err
}
if err := user.ErrIfRoot(); err != nil {
return errors.WithAdditionalf(err, "cannot change password for root user")
}
password, err := module.store.GetPasswordByUserID(ctx, userID)
if err != nil {
return err
@@ -507,7 +476,7 @@ func (m *Module) RevokeAPIKey(ctx context.Context, id, removedByUserID valuer.UU
}
func (module *Module) CreateFirstUser(ctx context.Context, organization *types.Organization, name string, email valuer.Email, passwd string) (*types.User, error) {
user, err := types.NewRootUser(name, email, organization.ID)
user, err := types.NewUser(name, email, types.RoleAdmin, organization.ID)
if err != nil {
return nil, err
}

View File

@@ -1,187 +0,0 @@
package impluser
import (
"context"
"time"
"github.com/SigNoz/signoz/pkg/authz"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/modules/user"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/roletypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type service struct {
settings factory.ScopedProviderSettings
store types.UserStore
module user.Module
orgGetter organization.Getter
authz authz.AuthZ
config user.RootConfig
stopC chan struct{}
}
func NewService(
providerSettings factory.ProviderSettings,
store types.UserStore,
module user.Module,
orgGetter organization.Getter,
authz authz.AuthZ,
config user.RootConfig,
) user.Service {
return &service{
settings: factory.NewScopedProviderSettings(providerSettings, "go.signoz.io/pkg/modules/user"),
store: store,
module: module,
orgGetter: orgGetter,
authz: authz,
config: config,
stopC: make(chan struct{}),
}
}
func (s *service) Start(ctx context.Context) error {
if !s.config.Enabled {
<-s.stopC
return nil
}
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
for {
err := s.reconcile(ctx)
if err == nil {
s.settings.Logger().InfoContext(ctx, "root user reconciliation completed successfully")
<-s.stopC
return nil
}
s.settings.Logger().WarnContext(ctx, "root user reconciliation failed, retrying", "error", err)
select {
case <-s.stopC:
return nil
case <-ticker.C:
continue
}
}
}
func (s *service) Stop(ctx context.Context) error {
close(s.stopC)
return nil
}
func (s *service) reconcile(ctx context.Context) error {
org, err := s.orgGetter.GetByName(ctx, s.config.OrgName)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
newOrg := types.NewOrganizationWithName(s.config.OrgName)
_, err := s.module.CreateFirstUser(ctx, newOrg, s.config.Email.String(), s.config.Email, s.config.Password)
return err
}
return err
}
return s.reconcileRootUser(ctx, org.ID)
}
func (s *service) reconcileRootUser(ctx context.Context, orgID valuer.UUID) error {
existingRoot, err := s.store.GetRootUserByOrgID(ctx, orgID)
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
return err
}
if existingRoot == nil {
return s.createOrPromoteRootUser(ctx, orgID)
}
return s.updateExistingRootUser(ctx, orgID, existingRoot)
}
func (s *service) createOrPromoteRootUser(ctx context.Context, orgID valuer.UUID) error {
existingUser, err := s.store.GetUserByEmailAndOrgID(ctx, s.config.Email, orgID)
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
return err
}
if existingUser != nil {
oldRole := existingUser.Role
existingUser.PromoteToRoot()
if err := s.module.UpdateAnyUser(ctx, orgID, existingUser); err != nil {
return err
}
if oldRole != types.RoleAdmin {
if err := s.authz.ModifyGrant(ctx,
orgID,
roletypes.MustGetSigNozManagedRoleFromExistingRole(oldRole),
roletypes.MustGetSigNozManagedRoleFromExistingRole(types.RoleAdmin),
authtypes.MustNewSubject(authtypes.TypeableUser, existingUser.ID.StringValue(), orgID, nil),
); err != nil {
return err
}
}
return s.setPassword(ctx, existingUser.ID)
}
// Create new root user
newUser, err := types.NewRootUser(s.config.Email.String(), s.config.Email, orgID)
if err != nil {
return err
}
factorPassword, err := types.NewFactorPassword(s.config.Password, newUser.ID.StringValue())
if err != nil {
return err
}
return s.module.CreateUser(ctx, newUser, user.WithFactorPassword(factorPassword))
}
func (s *service) updateExistingRootUser(ctx context.Context, orgID valuer.UUID, existingRoot *types.User) error {
existingRoot.PromoteToRoot()
if existingRoot.Email != s.config.Email {
existingRoot.UpdateEmail(s.config.Email)
if err := s.module.UpdateAnyUser(ctx, orgID, existingRoot); err != nil {
return err
}
}
return s.setPassword(ctx, existingRoot.ID)
}
func (s *service) setPassword(ctx context.Context, userID valuer.UUID) error {
password, err := s.store.GetPasswordByUserID(ctx, userID)
if err != nil {
if !errors.Ast(err, errors.TypeNotFound) {
return err
}
factorPassword, err := types.NewFactorPassword(s.config.Password, userID.StringValue())
if err != nil {
return err
}
return s.store.CreatePassword(ctx, factorPassword)
}
if !password.Equals(s.config.Password) {
if err := password.Update(s.config.Password); err != nil {
return err
}
return s.store.UpdatePassword(ctx, password)
}
return nil
}

View File

@@ -210,24 +210,20 @@ func (store *store) GetUsersByRoleAndOrgID(ctx context.Context, role types.Role,
return users, nil
}
func (store *store) UpdateUser(ctx context.Context, orgID valuer.UUID, user *types.User) error {
_, err := store.
sqlstore.
BunDBCtx(ctx).
NewUpdate().
func (store *store) UpdateUser(ctx context.Context, orgID valuer.UUID, id string, user *types.User) (*types.User, error) {
user.UpdatedAt = time.Now()
_, err := store.sqlstore.BunDB().NewUpdate().
Model(user).
Column("display_name").
Column("email").
Column("role").
Column("is_root").
Column("updated_at").
Where("id = ?", id).
Where("org_id = ?", orgID).
Where("id = ?", user.ID).
Exec(ctx)
if err != nil {
return store.sqlstore.WrapNotFoundErrf(err, types.ErrCodeUserNotFound, "user does not exist in org: %s", orgID)
return nil, store.sqlstore.WrapNotFoundErrf(err, types.ErrCodeUserNotFound, "user with id: %s does not exist in org: %s", id, orgID)
}
return nil
return user, nil
}
func (store *store) ListUsersByOrgID(ctx context.Context, orgID valuer.UUID) ([]*types.GettableUser, error) {
@@ -606,22 +602,6 @@ func (store *store) RunInTx(ctx context.Context, cb func(ctx context.Context) er
})
}
func (store *store) GetRootUserByOrgID(ctx context.Context, orgID valuer.UUID) (*types.User, error) {
user := new(types.User)
err := store.
sqlstore.
BunDBCtx(ctx).
NewSelect().
Model(user).
Where("org_id = ?", orgID).
Where("is_root = ?", true).
Scan(ctx)
if err != nil {
return nil, store.sqlstore.WrapNotFoundErrf(err, types.ErrCodeUserNotFound, "root user for org %s not found", orgID)
}
return user, nil
}
func (store *store) ListUsersByEmailAndOrgIDs(ctx context.Context, email valuer.Email, orgIDs []valuer.UUID) ([]*types.User, error) {
users := []*types.User{}
err := store.

View File

@@ -1,7 +0,0 @@
package user
import "github.com/SigNoz/signoz/pkg/factory"
type Service interface {
factory.Service
}

View File

@@ -34,9 +34,6 @@ type Module interface {
ForgotPassword(ctx context.Context, orgID valuer.UUID, email valuer.Email, frontendBaseURL string) error
UpdateUser(ctx context.Context, orgID valuer.UUID, id string, user *types.User, updatedBy string) (*types.User, error)
// UpdateAnyUser updates a user and persists the changes to the database along with the analytics and identity deletion.
UpdateAnyUser(ctx context.Context, orgID valuer.UUID, user *types.User) error
DeleteUser(ctx context.Context, orgID valuer.UUID, id string, deletedBy string) error
// invite
@@ -57,9 +54,6 @@ type Module interface {
}
type Getter interface {
// Get root user by org id.
GetRootUserByOrgID(context.Context, valuer.UUID) (*types.User, error)
// Get gets the users based on the given id
ListByOrgID(context.Context, valuer.UUID) ([]*types.User, error)

View File

@@ -183,7 +183,7 @@ type APIHandlerOpts struct {
}
// NewAPIHandler returns an APIHandler
func NewAPIHandler(opts APIHandlerOpts, config signoz.Config) (*APIHandler, error) {
func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
querierOpts := querier.QuerierOptions{
Reader: opts.Reader,
Cache: opts.Signoz.Cache,
@@ -270,11 +270,6 @@ func NewAPIHandler(opts APIHandlerOpts, config signoz.Config) (*APIHandler, erro
}
}
// If the root user is enabled, the setup is complete
if config.User.Root.Enabled {
aH.SetupCompleted = true
}
aH.Upgrader = &websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool {
return true

View File

@@ -135,7 +135,7 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
Signoz: signoz,
QuerierAPI: querierAPI.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.Querier, signoz.Analytics),
QueryParserAPI: queryparser.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.QueryParser),
}, config)
})
if err != nil {
return nil, err
}

View File

@@ -205,7 +205,7 @@ func AdjustKey(key *telemetrytypes.TelemetryFieldKey, keys map[string][]*telemet
key.Indexes = matchingKey.Indexes
key.Materialized = matchingKey.Materialized
key.JSONPlan = matchingKey.JSONPlan
return actions
} else {
// multiple matching keys, set materialized only if all the keys are materialized

View File

@@ -483,6 +483,22 @@ func (v *filterExpressionVisitor) VisitComparison(ctx *grammar.ComparisonContext
value1 := v.Visit(values[0])
value2 := v.Visit(values[1])
switch value1.(type) {
case float64:
if _, ok := value2.(float64); !ok {
v.errors = append(v.errors, fmt.Sprintf("value type mismatch for key %s: expected number for both operands", keys[0].Name))
return ""
}
case string:
if _, ok := value2.(string); !ok {
v.errors = append(v.errors, fmt.Sprintf("value type mismatch for key %s: expected string for both operands", keys[0].Name))
return ""
}
default:
v.errors = append(v.errors, fmt.Sprintf("value type mismatch for key %s: operands must be number or string", keys[0].Name))
return ""
}
var conds []string
for _, key := range keys {
condition, err := v.conditionBuilder.ConditionFor(context.Background(), key, op, []any{value1, value2}, v.builder, v.startNs, v.endNs)
@@ -855,7 +871,7 @@ func (v *filterExpressionVisitor) VisitKey(ctx *grammar.KeyContext) any {
// 1. either user meant key ( this is already handled above in fieldKeysForName )
// 2. or user meant `attribute.key` we look up in the map for all possible field keys with name 'attribute.key'
// Note:
// Note:
// If user only wants to search `attribute.key`, then they have to use `attribute.attribute.key`
// If user only wants to search `key`, then they have to use `key`
// If user wants to search both, they can use `attribute.key` and we will resolve the ambiguity

View File

@@ -375,13 +375,6 @@ func mergeAndEnsureBackwardCompatibility(ctx context.Context, logger *slog.Logge
config.Flagger.Config.Boolean[flagger.FeatureKafkaSpanEval.String()] = os.Getenv("KAFKA_SPAN_EVAL") == "true"
}
if os.Getenv("INTERPOLATION_ENABLED") != "" {
logger.WarnContext(ctx, "[Deprecated] env INTERPOLATION_ENABLED is deprecated and scheduled for removal. Please use SIGNOZ_FLAGGER_CONFIG_BOOLEAN_INTERPOLATION__ENABLED instead.")
if config.Flagger.Config.Boolean == nil {
config.Flagger.Config.Boolean = make(map[string]bool)
}
config.Flagger.Config.Boolean[flagger.FeatureInterpolationEnabled.String()] = os.Getenv("INTERPOLATION_ENABLED") == "true"
}
}
func (config Config) Collect(_ context.Context, _ valuer.UUID) (map[string]any, error) {

View File

@@ -167,7 +167,6 @@ func NewSQLMigrationProviderFactories(
sqlmigration.NewMigrateRbacToAuthzFactory(sqlstore),
sqlmigration.NewMigratePublicDashboardsFactory(sqlstore),
sqlmigration.NewAddAnonymousPublicDashboardTransactionFactory(sqlstore),
sqlmigration.NewAddRootUserFactory(sqlstore, sqlschema),
)
}

View File

@@ -389,8 +389,6 @@ func New(
// Initialize all modules
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, analytics, querier, telemetrystore, telemetryMetadataStore, authNs, authz, cache, queryParser, config, dashboard)
userService := impluser.NewService(providerSettings, impluser.NewStore(sqlstore, providerSettings), modules.User, orgGetter, authz, config.User.Root)
// Initialize all handlers for the modules
handlers := NewHandlers(modules, providerSettings, querier, licensing, global, flagger, gateway, telemetryMetadataStore, authz)
@@ -440,7 +438,6 @@ func New(
factory.NewNamedService(factory.MustNewName("statsreporter"), statsReporter),
factory.NewNamedService(factory.MustNewName("tokenizer"), tokenizer),
factory.NewNamedService(factory.MustNewName("authz"), authz),
factory.NewNamedService(factory.MustNewName("user"), userService),
)
if err != nil {
return nil, err

View File

@@ -1,80 +0,0 @@
package sqlmigration
import (
"context"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type addRootUser struct {
sqlstore sqlstore.SQLStore
sqlschema sqlschema.SQLSchema
}
func NewAddRootUserFactory(sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(factory.MustNewName("add_root_user"), func(ctx context.Context, providerSettings factory.ProviderSettings, config Config) (SQLMigration, error) {
return &addRootUser{
sqlstore: sqlstore,
sqlschema: sqlschema,
}, nil
})
}
func (migration *addRootUser) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *addRootUser) Up(ctx context.Context, db *bun.DB) error {
if err := migration.sqlschema.ToggleFKEnforcement(ctx, db, false); err != nil {
return err
}
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
table, uniqueConstraints, err := migration.sqlschema.GetTable(ctx, sqlschema.TableName("users"))
if err != nil {
return err
}
column := &sqlschema.Column{
Name: sqlschema.ColumnName("is_root"),
DataType: sqlschema.DataTypeBoolean,
Nullable: false,
}
sqls := migration.sqlschema.Operator().AddColumn(table, uniqueConstraints, column, false)
for _, sql := range sqls {
if _, err := tx.ExecContext(ctx, string(sql)); err != nil {
return err
}
}
if err := tx.Commit(); err != nil {
return err
}
if err := migration.sqlschema.ToggleFKEnforcement(ctx, db, true); err != nil {
return err
}
return nil
}
func (migration *addRootUser) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"log/slog"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/querybuilder"
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
@@ -73,7 +74,7 @@ func (b *meterQueryStatementBuilder) buildPipelineStatement(
cteArgs [][]any
)
if b.metricsStatementBuilder.CanShortCircuitDelta(query) {
if qbtypes.CanShortCircuitDelta(query.Aggregations[0]) {
// spatial_aggregation_cte directly for certain delta queries
if frag, args, err := b.buildTemporalAggDeltaFastPath(ctx, start, end, query, keys, variables); err != nil {
return nil, err
@@ -91,8 +92,9 @@ func (b *meterQueryStatementBuilder) buildPipelineStatement(
}
// spatial_aggregation_cte
frag, args := b.buildSpatialAggregationCTE(ctx, start, end, query, keys)
if frag != "" {
if frag, args, err := b.buildSpatialAggregationCTE(ctx, start, end, query, keys); err != nil {
return nil, err
} else if frag != "" {
cteFragments = append(cteFragments, frag)
cteArgs = append(cteArgs, args)
}
@@ -122,13 +124,16 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDeltaFastPath(
for _, g := range query.GroupBy {
col, err := b.fm.ColumnExpressionFor(ctx, &g.TelemetryFieldKey, keys)
if err != nil {
return "", []any{}, err
return "", nil, err
}
sb.SelectMore(col)
}
tbl := WhichSamplesTableToUse(start, end, query.Aggregations[0].Type, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
if err != nil {
return "", nil, err
}
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
aggCol = fmt.Sprintf("%s/%d", aggCol, stepSec)
}
@@ -150,7 +155,7 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDeltaFastPath(
Variables: variables,
}, start, end)
if err != nil {
return "", []any{}, err
return "", nil, err
}
}
if filterWhere != nil {
@@ -208,8 +213,11 @@ func (b *meterQueryStatementBuilder) buildTemporalAggDelta(
}
tbl := WhichSamplesTableToUse(start, end, query.Aggregations[0].Type, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality,
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality,
query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
if err != nil {
return "", nil, err
}
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
aggCol = fmt.Sprintf("%s/%d", aggCol, stepSec)
}
@@ -278,7 +286,10 @@ func (b *meterQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
}
tbl := WhichSamplesTableToUse(start, end, query.Aggregations[0].Type, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
if err != nil {
return "", nil, err
}
baseSb.SelectMore(fmt.Sprintf("%s AS per_series_value", aggCol))
baseSb.From(fmt.Sprintf("%s.%s AS points", DBName, tbl))
@@ -315,25 +326,23 @@ func (b *meterQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
switch query.Aggregations[0].TimeAggregation {
case metrictypes.TimeAggregationRate:
rateExpr := fmt.Sprintf(telemetrymetrics.RateWithoutNegative, start, start)
wrapped := sqlbuilder.NewSelectBuilder()
wrapped.Select("ts")
for _, g := range query.GroupBy {
wrapped.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", rateExpr))
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", telemetrymetrics.RateTmpl))
wrapped.From(fmt.Sprintf("(%s) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)", innerQuery))
q, args := wrapped.BuildWithFlavor(sqlbuilder.ClickHouse, innerArgs...)
return fmt.Sprintf("__temporal_aggregation_cte AS (%s)", q), args, nil
case metrictypes.TimeAggregationIncrease:
incExpr := fmt.Sprintf(telemetrymetrics.IncreaseWithoutNegative, start, start)
wrapped := sqlbuilder.NewSelectBuilder()
wrapped.Select("ts")
for _, g := range query.GroupBy {
wrapped.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", incExpr))
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", telemetrymetrics.IncreaseTmpl))
wrapped.From(fmt.Sprintf("(%s) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)", innerQuery))
q, args := wrapped.BuildWithFlavor(sqlbuilder.ClickHouse, innerArgs...)
return fmt.Sprintf("__temporal_aggregation_cte AS (%s)", q), args, nil
@@ -348,7 +357,15 @@ func (b *meterQueryStatementBuilder) buildSpatialAggregationCTE(
_ uint64,
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
_ map[string][]*telemetrytypes.TelemetryFieldKey,
) (string, []any) {
) (string, []any, error) {
if query.Aggregations[0].SpaceAggregation.IsZero() {
return "", nil, errors.Newf(
errors.TypeInvalidInput,
errors.CodeInvalidInput,
"invalid space aggregation, should be one of the following: [`sum`, `avg`, `min`, `max`, `count`]",
)
}
sb := sqlbuilder.NewSelectBuilder()
sb.Select("ts")
@@ -365,5 +382,5 @@ func (b *meterQueryStatementBuilder) buildSpatialAggregationCTE(
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
q, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args, nil
}

View File

@@ -51,7 +51,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747785600000))) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747785600000))) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, max(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte",
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, max(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747785600000), uint64(1747983420000), "cartservice", "cumulative", 0},
},
expectedErr: nil,
@@ -84,7 +84,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte",
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747872000000), uint64(1747983420000), "cartservice", "delta"},
},
expectedErr: nil,
@@ -117,7 +117,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `service.name`, avg(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte",
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `service.name`, avg(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747872000000), uint64(1747983420000), "cartservice", "delta", 0},
},
expectedErr: nil,
@@ -150,7 +150,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'host.name') AS `host.name`, avg(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'host.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte",
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'host.name') AS `host.name`, avg(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'host.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `host.name`, ts",
Args: []any{"system.memory.usage", uint64(1747872000000), uint64(1747983420000), "big-data-node-1", "unspecified", 0},
},
expectedErr: nil,

View File

@@ -3,6 +3,7 @@ package telemetrymeter
import (
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types/metrictypes"
)
@@ -63,7 +64,7 @@ func AggregationColumnForSamplesTable(
temporality metrictypes.Temporality,
timeAggregation metrictypes.TimeAggregation,
tableHints *metrictypes.MetricTableHints,
) string {
) (string, error) {
tableName := WhichSamplesTableToUse(start, end, metricType, timeAggregation, tableHints)
var aggregationColumn string
switch temporality {
@@ -190,5 +191,13 @@ func AggregationColumnForSamplesTable(
}
}
return aggregationColumn
if aggregationColumn == "" {
return "", errors.Newf(
errors.TypeInvalidInput,
errors.CodeInvalidInput,
"invalid time aggregation, should be one of the following: [`latest`, `sum`, `avg`, `min`, `max`, `count`, `rate`, `increase`]",
)
}
return aggregationColumn, nil
}

View File

@@ -29,13 +29,7 @@ func (c *conditionBuilder) conditionFor(
sb *sqlbuilder.SelectBuilder,
) (string, error) {
switch operator {
case qbtypes.FilterOperatorContains,
qbtypes.FilterOperatorNotContains,
qbtypes.FilterOperatorILike,
qbtypes.FilterOperatorNotILike,
qbtypes.FilterOperatorLike,
qbtypes.FilterOperatorNotLike:
if operator.IsStringSearchOperator() {
value = querybuilder.FormatValueForContains(value)
}
@@ -44,6 +38,18 @@ func (c *conditionBuilder) conditionFor(
return "", err
}
// TODO(srikanthccv): use the same data type collision handling when metrics schemas are updated
switch v := value.(type) {
case float64:
tblFieldName = fmt.Sprintf("toFloat64OrNull(%s)", tblFieldName)
case []any:
if len(v) > 0 && (operator == qbtypes.FilterOperatorBetween || operator == qbtypes.FilterOperatorNotBetween) {
if _, ok := v[0].(float64); ok {
tblFieldName = fmt.Sprintf("toFloat64OrNull(%s)", tblFieldName)
}
}
}
switch operator {
case qbtypes.FilterOperatorEqual:
return sb.E(tblFieldName, value), nil

View File

@@ -5,67 +5,27 @@ import (
"fmt"
"log/slog"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/querybuilder"
"github.com/SigNoz/signoz/pkg/types/featuretypes"
"github.com/SigNoz/signoz/pkg/types/metrictypes"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/huandu/go-sqlbuilder"
"golang.org/x/exp/slices"
)
const (
RateWithoutNegative = `If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))`
IncreaseWithoutNegative = `If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value, ((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window)) * (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))`
RateTmpl = `multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window))`
RateWithoutNegativeMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, IF((%s - lagInFrame(%s, 1, 0) OVER rate_window) < 0, %s / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window), (%s - lagInFrame(%s, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))) AS per_series_value`
IncreaseWithoutNegativeMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, IF((%s - lagInFrame(%s, 1, 0) OVER rate_window) < 0, %s, ((%s - lagInFrame(%s, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window)) * (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(%d))) OVER rate_window))) AS per_series_value`
OthersMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, %s) AS per_series_value`
IncreaseTmpl = `multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value, per_series_value - lagInFrame(per_series_value, 1) OVER rate_window)`
RateWithInterpolation = `
CASE
WHEN row_number() OVER rate_window = 1 THEN
-- First row: try to interpolate using next value
CASE
WHEN leadInFrame(per_series_value, 1) OVER rate_window IS NOT NULL THEN
-- Assume linear growth to next point
(leadInFrame(per_series_value, 1) OVER rate_window - per_series_value) /
(leadInFrame(ts, 1) OVER rate_window - ts)
ELSE
0 -- No next value either, can't interpolate
END
WHEN (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0 THEN
-- Counter reset detected
per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window)
ELSE
-- Normal case: calculate rate
(per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) /
(ts - lagInFrame(ts, 1) OVER rate_window)
END`
RateWithoutNegativeMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, multiIf(row_number() OVER rate_window = 1, nan, (%s - lagInFrame(%s, 1) OVER rate_window) < 0, %s / (ts - lagInFrame(ts, 1) OVER rate_window), (%s - lagInFrame(%s, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window))) AS per_series_value`
IncreaseWithInterpolation = `
CASE
WHEN row_number() OVER rate_window = 1 THEN
-- First row: try to interpolate using next value
CASE
WHEN leadInFrame(per_series_value, 1) OVER rate_window IS NOT NULL THEN
-- Calculate the interpolated increase for this interval
((leadInFrame(per_series_value, 1) OVER rate_window - per_series_value) /
(leadInFrame(ts, 1) OVER rate_window - ts)) *
(leadInFrame(ts, 1) OVER rate_window - ts)
ELSE
0 -- No next value either, can't interpolate
END
WHEN (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0 THEN
-- Counter reset detected: the increase is the current value
per_series_value
ELSE
-- Normal case: calculate increase
(per_series_value - lagInFrame(per_series_value, 1) OVER rate_window)
END`
IncreaseWithoutNegativeMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, multiIf(row_number() OVER rate_window = 1, nan, (%s - lagInFrame(%s, 1) OVER rate_window) < 0, %s, (%s - lagInFrame(%s, 1) OVER rate_window))) AS per_series_value`
OthersMultiTemporality = `IF(LOWER(temporality) LIKE LOWER('delta'), %s, %s) AS per_series_value`
)
type MetricQueryStatementBuilder struct {
@@ -147,54 +107,6 @@ func (b *MetricQueryStatementBuilder) Build(
return b.buildPipelineStatement(ctx, start, end, query, keys, variables)
}
// Fastpath (no fingerprint grouping)
// canShortCircuitDelta returns true if we can use the optimized query
// for the given query
// This is used to avoid the group by fingerprint thus improving the performance
// for certain queries
// cases where we can short circuit:
// 1. time aggregation = (rate|increase) and space aggregation = sum
// - rate = sum(value)/step, increase = sum(value) - sum of sums is same as sum of all values
//
// 2. time aggregation = sum and space aggregation = sum
// - sum of sums is same as sum of all values
//
// 3. time aggregation = min and space aggregation = min
// - min of mins is same as min of all values
//
// 4. time aggregation = max and space aggregation = max
// - max of maxs is same as max of all values
//
// 5. special case exphist, there is no need for per series/fingerprint aggregation
// we can directly use the quantilesDDMerge function
//
// all of this is true only for delta metrics
func (b *MetricQueryStatementBuilder) CanShortCircuitDelta(q qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]) bool {
if q.Aggregations[0].Temporality != metrictypes.Delta {
return false
}
ta := q.Aggregations[0].TimeAggregation
sa := q.Aggregations[0].SpaceAggregation
if (ta == metrictypes.TimeAggregationRate || ta == metrictypes.TimeAggregationIncrease) && sa == metrictypes.SpaceAggregationSum {
return true
}
if ta == metrictypes.TimeAggregationSum && sa == metrictypes.SpaceAggregationSum {
return true
}
if ta == metrictypes.TimeAggregationMin && sa == metrictypes.SpaceAggregationMin {
return true
}
if ta == metrictypes.TimeAggregationMax && sa == metrictypes.SpaceAggregationMax {
return true
}
if q.Aggregations[0].Type == metrictypes.ExpHistogramType && sa.IsPercentile() {
return true
}
return false
}
func (b *MetricQueryStatementBuilder) buildPipelineStatement(
ctx context.Context,
start, end uint64,
@@ -256,10 +168,11 @@ func (b *MetricQueryStatementBuilder) buildPipelineStatement(
return nil, err
}
if b.CanShortCircuitDelta(query) {
if qbtypes.CanShortCircuitDelta(query.Aggregations[0]) {
// spatial_aggregation_cte directly for certain delta queries
frag, args := b.buildTemporalAggDeltaFastPath(start, end, query, timeSeriesCTE, timeSeriesCTEArgs)
if frag != "" {
if frag, args, err := b.buildTemporalAggDeltaFastPath(start, end, query, timeSeriesCTE, timeSeriesCTEArgs); err != nil {
return nil, err
} else if frag != "" {
cteFragments = append(cteFragments, frag)
cteArgs = append(cteArgs, args)
}
@@ -273,8 +186,9 @@ func (b *MetricQueryStatementBuilder) buildPipelineStatement(
}
// spatial_aggregation_cte
frag, args := b.buildSpatialAggregationCTE(ctx, start, end, query, keys)
if frag != "" {
if frag, args, err := b.buildSpatialAggregationCTE(ctx, start, end, query, keys); err != nil {
return nil, err
} else if frag != "" {
cteFragments = append(cteFragments, frag)
cteArgs = append(cteArgs, args)
}
@@ -294,7 +208,7 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDeltaFastPath(
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
timeSeriesCTE string,
timeSeriesCTEArgs []any,
) (string, []any) {
) (string, []any, error) {
stepSec := int64(query.StepInterval.Seconds())
sb := sqlbuilder.NewSelectBuilder()
@@ -307,11 +221,15 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDeltaFastPath(
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
aggCol := AggregationColumnForSamplesTable(
aggCol, err := AggregationColumnForSamplesTable(
start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality,
query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints,
)
if err != nil {
return "", nil, err
}
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
// TODO(srikanthccv): should it be step interval or use [start_time_unix_nano](https://github.com/open-telemetry/opentelemetry-proto/blob/d3fb76d70deb0874692bd0ebe03148580d85f3bb/opentelemetry/proto/metrics/v1/metrics.proto#L400C11-L400C31)?
aggCol = fmt.Sprintf("%s/%d", aggCol, stepSec)
}
@@ -334,7 +252,7 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDeltaFastPath(
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
q, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse, timeSeriesCTEArgs...)
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args, nil
}
func (b *MetricQueryStatementBuilder) buildTimeSeriesCTE(
@@ -437,8 +355,12 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDelta(
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
if err != nil {
return "", nil, err
}
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
// TODO(srikanthccv): should it be step interval or use [start_time_unix_nano](https://github.com/open-telemetry/opentelemetry-proto/blob/d3fb76d70deb0874692bd0ebe03148580d85f3bb/opentelemetry/proto/metrics/v1/metrics.proto#L400C11-L400C31)?
aggCol = fmt.Sprintf("%s/%d", aggCol, stepSec)
}
@@ -461,7 +383,7 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggDelta(
}
func (b *MetricQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
ctx context.Context,
_ context.Context,
start, end uint64,
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
timeSeriesCTE string,
@@ -479,7 +401,10 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
baseSb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
aggCol := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
aggCol, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, query.Aggregations[0].Temporality, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
if err != nil {
return "", nil, err
}
baseSb.SelectMore(fmt.Sprintf("%s AS per_series_value", aggCol))
tbl := WhichSamplesTableToUse(start, end, query.Aggregations[0].Type, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
@@ -496,36 +421,25 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
innerQuery, innerArgs := baseSb.BuildWithFlavor(sqlbuilder.ClickHouse, timeSeriesCTEArgs...)
// ! TODO (balanikaran) Get OrgID via function parameter instead of valuer.GenerateUUID()
interpolationEnabled := b.flagger.BooleanOrEmpty(ctx, flagger.FeatureInterpolationEnabled, featuretypes.NewFlaggerEvaluationContext(valuer.GenerateUUID()))
switch query.Aggregations[0].TimeAggregation {
case metrictypes.TimeAggregationRate:
rateExpr := fmt.Sprintf(RateWithoutNegative, start, start)
if interpolationEnabled {
rateExpr = RateWithInterpolation
}
wrapped := sqlbuilder.NewSelectBuilder()
wrapped.Select("ts")
for _, g := range query.GroupBy {
wrapped.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", rateExpr))
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", RateTmpl))
wrapped.From(fmt.Sprintf("(%s) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)", innerQuery))
q, args := wrapped.BuildWithFlavor(sqlbuilder.ClickHouse, innerArgs...)
return fmt.Sprintf("__temporal_aggregation_cte AS (%s)", q), args, nil
case metrictypes.TimeAggregationIncrease:
incExpr := fmt.Sprintf(IncreaseWithoutNegative, start, start)
if interpolationEnabled {
incExpr = IncreaseWithInterpolation
}
wrapped := sqlbuilder.NewSelectBuilder()
wrapped.Select("ts")
for _, g := range query.GroupBy {
wrapped.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", incExpr))
wrapped.SelectMore(fmt.Sprintf("%s AS per_series_value", IncreaseTmpl))
wrapped.From(fmt.Sprintf("(%s) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)", innerQuery))
q, args := wrapped.BuildWithFlavor(sqlbuilder.ClickHouse, innerArgs...)
return fmt.Sprintf("__temporal_aggregation_cte AS (%s)", q), args, nil
@@ -534,7 +448,6 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggCumulativeOrUnspecified(
}
}
// because RateInterpolation is not enabled anywhere due to some gaps in the logic wrt cache handling, it hasn't been considered for the multi temporality
func (b *MetricQueryStatementBuilder) buildTemporalAggForMultipleTemporalities(
_ context.Context,
start, end uint64,
@@ -553,18 +466,32 @@ func (b *MetricQueryStatementBuilder) buildTemporalAggForMultipleTemporalities(
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
aggForDeltaTemporality := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, metrictypes.Delta, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
aggForCumulativeTemporality := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, metrictypes.Cumulative, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
aggForDeltaTemporality, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, metrictypes.Delta, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
if err != nil {
return "", nil, err
}
aggForCumulativeTemporality, err := AggregationColumnForSamplesTable(start, end, query.Aggregations[0].Type, metrictypes.Cumulative, query.Aggregations[0].TimeAggregation, query.Aggregations[0].TableHints)
if err != nil {
return "", nil, err
}
if query.Aggregations[0].TimeAggregation == metrictypes.TimeAggregationRate {
aggForDeltaTemporality = fmt.Sprintf("%s/%d", aggForDeltaTemporality, stepSec)
}
switch query.Aggregations[0].TimeAggregation {
case metrictypes.TimeAggregationRate:
rateExpr := fmt.Sprintf(RateWithoutNegativeMultiTemporality, aggForDeltaTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, start, aggForCumulativeTemporality, aggForCumulativeTemporality, start)
rateExpr := fmt.Sprintf(RateWithoutNegativeMultiTemporality,
aggForDeltaTemporality,
aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality,
aggForCumulativeTemporality, aggForCumulativeTemporality,
)
sb.SelectMore(rateExpr)
case metrictypes.TimeAggregationIncrease:
increaseExpr := fmt.Sprintf(IncreaseWithoutNegativeMultiTemporality, aggForDeltaTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality, start, start)
increaseExpr := fmt.Sprintf(IncreaseWithoutNegativeMultiTemporality,
aggForDeltaTemporality,
aggForCumulativeTemporality, aggForCumulativeTemporality, aggForCumulativeTemporality,
aggForCumulativeTemporality, aggForCumulativeTemporality,
)
sb.SelectMore(increaseExpr)
default:
expr := fmt.Sprintf(OthersMultiTemporality, aggForDeltaTemporality, aggForCumulativeTemporality)
@@ -592,7 +519,14 @@ func (b *MetricQueryStatementBuilder) buildSpatialAggregationCTE(
_ uint64,
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
_ map[string][]*telemetrytypes.TelemetryFieldKey,
) (string, []any) {
) (string, []any, error) {
if query.Aggregations[0].SpaceAggregation.IsZero() {
return "", nil, errors.Newf(
errors.TypeInvalidInput,
errors.CodeInvalidInput,
"invalid space aggregation, should be one of the following: [`sum`, `avg`, `min`, `max`, `count`, `p50`, `p75`, `p90`, `p95`, `p99`]",
)
}
sb := sqlbuilder.NewSelectBuilder()
sb.Select("ts")
@@ -609,7 +543,7 @@ func (b *MetricQueryStatementBuilder) buildSpatialAggregationCTE(
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
q, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args, nil
}
func (b *MetricQueryStatementBuilder) BuildFinalSelect(
@@ -641,9 +575,7 @@ func (b *MetricQueryStatementBuilder) BuildFinalSelect(
quantile,
))
sb.From("__spatial_aggregation_cte")
for _, g := range query.GroupBy {
sb.GroupBy(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
sb.GroupBy("ts")
if query.Having != nil && query.Having.Expression != "" {
rewriter := querybuilder.NewHavingExpressionRewriter()
@@ -659,6 +591,8 @@ func (b *MetricQueryStatementBuilder) BuildFinalSelect(
sb.Where(rewrittenExpr)
}
}
sb.OrderBy(querybuilder.GroupByKeys(query.GroupBy)...)
sb.OrderBy("ts")
q, a := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
return &qbtypes.Statement{Query: combined + q, Args: append(args, a...)}, nil

View File

@@ -50,7 +50,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947360000))) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947360000))) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte",
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
},
expectedErr: nil,
@@ -83,7 +83,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947360000))) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947360000))) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND (match(JSONExtractString(labels, 'materialized.key.name'), ?) OR JSONExtractString(labels, 'service.name') = ?) GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte",
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND (match(JSONExtractString(labels, 'materialized.key.name'), ?) OR JSONExtractString(labels, 'service.name') = ?) GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
},
expectedErr: nil,
@@ -116,7 +116,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte",
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_calls_total", uint64(1747947390000), uint64(1747983420000)},
},
expectedErr: nil,
@@ -148,7 +148,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts",
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts ORDER BY `service.name`, ts",
Args: []any{"signoz_latency", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_latency", uint64(1747947390000), uint64(1747983420000)},
},
expectedErr: nil,
@@ -181,7 +181,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `host.name`, avg(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'host.name') AS `host.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'host.name') = ? GROUP BY fingerprint, `host.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte",
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `host.name`, avg(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'host.name') AS `host.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'host.name') = ? GROUP BY fingerprint, `host.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `host.name`, ts",
Args: []any{"system.memory.usage", uint64(1747936800000), uint64(1747983420000), "unspecified", false, "big-data-node-1", "system.memory.usage", uint64(1747947390000), uint64(1747983420000), 0},
},
expectedErr: nil,
@@ -210,7 +210,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, `le`, If((per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947390000))) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1, 0) OVER rate_window) / (ts - lagInFrame(ts, 1, toDateTime(fromUnixTimestamp64Milli(1747947390000))) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name`, `le` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, `le`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts",
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, `le`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name`, `le` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, `le`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts ORDER BY `service.name`, ts",
Args: []any{"http_server_duration_bucket", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "http_server_duration_bucket", uint64(1747947390000), uint64(1747983420000), 0},
},
expectedErr: nil,

View File

@@ -3,6 +3,7 @@ package telemetrymetrics
import (
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types/metrictypes"
)
@@ -168,7 +169,7 @@ func AggregationColumnForSamplesTable(
temporality metrictypes.Temporality,
timeAggregation metrictypes.TimeAggregation,
tableHints *metrictypes.MetricTableHints,
) string {
) (string, error) {
tableName := WhichSamplesTableToUse(start, end, metricType, timeAggregation, tableHints)
var aggregationColumn string
switch temporality {
@@ -298,5 +299,12 @@ func AggregationColumnForSamplesTable(
}
}
}
return aggregationColumn
if aggregationColumn == "" {
return "", errors.Newf(
errors.TypeInvalidInput,
errors.CodeInvalidInput,
"invalid time aggregation, should be one of the following: [`latest`, `sum`, `avg`, `min`, `max`, `count`, `rate`, `increase`]",
)
}
return aggregationColumn, nil
}

View File

@@ -35,13 +35,7 @@ func (c *conditionBuilder) conditionFor(
sb *sqlbuilder.SelectBuilder,
) (string, error) {
switch operator {
case qbtypes.FilterOperatorContains,
qbtypes.FilterOperatorNotContains,
qbtypes.FilterOperatorILike,
qbtypes.FilterOperatorNotILike,
qbtypes.FilterOperatorLike,
qbtypes.FilterOperatorNotLike:
if operator.IsStringSearchOperator() {
value = querybuilder.FormatValueForContains(value)
}

View File

@@ -41,22 +41,6 @@ func NewOrganization(displayName string) *Organization {
}
}
func NewOrganizationWithName(name string) *Organization {
id := valuer.GenerateUUID()
return &Organization{
Identifiable: Identifiable{
ID: id,
},
TimeAuditable: TimeAuditable{
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
Name: name,
DisplayName: name,
Key: NewOrganizationKey(id),
}
}
func NewOrganizationKey(orgID valuer.UUID) uint32 {
hasher := fnv.New32a()
@@ -90,7 +74,6 @@ type TTLSetting struct {
type OrganizationStore interface {
Create(context.Context, *Organization) error
Get(context.Context, valuer.UUID) (*Organization, error)
GetByName(context.Context, string) (*Organization, error)
GetAll(context.Context) ([]*Organization, error)
ListByKeyRange(context.Context, uint32, uint32) ([]*Organization, error)
Update(context.Context, *Organization) error

View File

@@ -152,7 +152,9 @@ func (f FilterOperator) IsStringSearchOperator() bool {
FilterOperatorILike,
FilterOperatorNotILike,
FilterOperatorLike,
FilterOperatorNotLike:
FilterOperatorNotLike,
FilterOperatorRegexp,
FilterOperatorNotRegexp:
return true
default:
return false

View File

@@ -3,6 +3,7 @@ package querybuildertypesv5
import (
"fmt"
"github.com/SigNoz/signoz/pkg/types/metrictypes"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
)
@@ -174,3 +175,54 @@ func (q *QueryBuilderQuery[T]) Normalize() {
}
}
// Fastpath (no fingerprint grouping)
// canShortCircuitDelta returns true if we can use the optimized query
// for the given query
// This is used to avoid the group by fingerprint thus improving the performance
// for certain queries
// cases where we can short circuit:
// 1. time aggregation = (rate|increase) and space aggregation = sum
// - rate = sum(value)/step, increase = sum(value) - sum of sums is same as sum of all values
//
// 2. time aggregation = sum and space aggregation = sum
// - sum of sums is same as sum of all values
//
// 3. time aggregation = min and space aggregation = min
// - min of mins is same as min of all values
//
// 4. time aggregation = max and space aggregation = max
// - max of maxs is same as max of all values
//
// 5. special case exphist, there is no need for per series/fingerprint aggregation
// we can directly use the quantilesDDMerge function
//
// all of this is true only for delta metrics
func CanShortCircuitDelta(metricAgg MetricAggregation) bool {
if metricAgg.Temporality != metrictypes.Delta {
return false
}
ta := metricAgg.TimeAggregation
sa := metricAgg.SpaceAggregation
if (ta == metrictypes.TimeAggregationRate || ta == metrictypes.TimeAggregationIncrease) &&
sa == metrictypes.SpaceAggregationSum {
return true
}
if ta == metrictypes.TimeAggregationSum && sa == metrictypes.SpaceAggregationSum {
return true
}
if ta == metrictypes.TimeAggregationMin && sa == metrictypes.SpaceAggregationMin {
return true
}
if ta == metrictypes.TimeAggregationMax && sa == metrictypes.SpaceAggregationMax {
return true
}
if metricAgg.Type == metrictypes.ExpHistogramType && sa.IsPercentile() {
return true
}
return false
}

View File

@@ -12,6 +12,7 @@ import (
var (
ErrColumnNotFound = errors.Newf(errors.TypeNotFound, errors.CodeNotFound, "field not found")
ErrBetweenValues = errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "(not) between operator requires two values")
ErrBetweenValuesType = errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "(not) between operator requires two values of the number type")
ErrInValues = errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "(not) in operator requires a list of values")
ErrUnsupportedOperator = errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "unsupported operator")
)

View File

@@ -75,7 +75,7 @@ func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
case QueryTypeFormula:
var spec QueryBuilderFormula
// TODO: use json.Unmarshal here after implementing custom unmarshaler for QueryBuilderFormula
// TODO(srikanthccv): use json.Unmarshal here after implementing custom unmarshaler for QueryBuilderFormula
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "formula spec"); err != nil {
return wrapUnmarshalError(err, "invalid formula spec: %v", err)
}
@@ -83,7 +83,7 @@ func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
case QueryTypeJoin:
var spec QueryBuilderJoin
// TODO: use json.Unmarshal here after implementing custom unmarshaler for QueryBuilderJoin
// TODO(srikanthccv): use json.Unmarshal here after implementing custom unmarshaler for QueryBuilderJoin
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "join spec"); err != nil {
return wrapUnmarshalError(err, "invalid join spec: %v", err)
}
@@ -98,7 +98,7 @@ func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
case QueryTypePromQL:
var spec PromQuery
// TODO: use json.Unmarshal here after implementing custom unmarshaler for PromQuery
// TODO(srikanthccv): use json.Unmarshal here after implementing custom unmarshaler for PromQuery
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "PromQL spec"); err != nil {
return wrapUnmarshalError(err, "invalid PromQL spec: %v", err)
}
@@ -106,7 +106,7 @@ func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
case QueryTypeClickHouseSQL:
var spec ClickHouseQuery
// TODO: use json.Unmarshal here after implementing custom unmarshaler for ClickHouseQuery
// TODO(srikanthccv): use json.Unmarshal here after implementing custom unmarshaler for ClickHouseQuery
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "ClickHouse SQL spec"); err != nil {
return wrapUnmarshalError(err, "invalid ClickHouse SQL spec: %v", err)
}
@@ -439,7 +439,7 @@ func (r *QueryRangeRequest) GetQueriesSupportingZeroDefault() map[string]bool {
expr = strings.ToLower(expr)
// only pure additive/counting operations should default to zero,
// while statistical/analytical operations should show gaps when there's no data to analyze.
// TODO: use newExprVisitor for getting the function used in the expression
// TODO(srikanthccv): use newExprVisitor for getting the function used in the expression
if strings.HasPrefix(expr, "count(") ||
strings.HasPrefix(expr, "count_distinct(") ||
strings.HasPrefix(expr, "sum(") ||

View File

@@ -21,3 +21,12 @@ var (
// []Bucket (struct{Lower,Upper,Count float64}), example: histogram
RequestTypeDistribution = RequestType{valuer.NewString("distribution")}
)
// IsAggregation returns true for request types that produce aggregated results
// (time_series, scalar, distribution). For these types, fields like groupBy,
// having, aggregations, and orderBy (with aggregation key validation) are meaningful.
// For non-aggregation types (raw, raw_stream, trace), those fields are ignored
// and don't need to be validated.
func (r RequestType) IsAggregation() bool {
return r == RequestTypeTimeSeries || r == RequestTypeScalar || r == RequestTypeDistribution
}

View File

@@ -10,54 +10,78 @@ import (
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
)
// queryName returns the name from any query envelope spec type.
func (e QueryEnvelope) queryName() string {
switch spec := e.Spec.(type) {
case QueryBuilderQuery[TraceAggregation]:
return spec.Name
case QueryBuilderQuery[LogAggregation]:
return spec.Name
case QueryBuilderQuery[MetricAggregation]:
return spec.Name
case QueryBuilderFormula:
return spec.Name
case QueryBuilderTraceOperator:
return spec.Name
case QueryBuilderJoin:
return spec.Name
case PromQuery:
return spec.Name
case ClickHouseQuery:
return spec.Name
}
return ""
}
// isDisabled returns the disabled status from any query envelope spec type.
func (e QueryEnvelope) isDisabled() bool {
switch spec := e.Spec.(type) {
case QueryBuilderQuery[TraceAggregation]:
return spec.Disabled
case QueryBuilderQuery[LogAggregation]:
return spec.Disabled
case QueryBuilderQuery[MetricAggregation]:
return spec.Disabled
case QueryBuilderFormula:
return spec.Disabled
case QueryBuilderTraceOperator:
return spec.Disabled
case QueryBuilderJoin:
return spec.Disabled
case PromQuery:
return spec.Disabled
case ClickHouseQuery:
return spec.Disabled
}
return false
}
// getQueryIdentifier returns a friendly identifier for a query based on its type and name/content
func getQueryIdentifier(envelope QueryEnvelope, index int) string {
name := envelope.queryName()
var typeLabel string
switch envelope.Type {
case QueryTypeBuilder, QueryTypeSubQuery:
switch spec := envelope.Spec.(type) {
case QueryBuilderQuery[TraceAggregation]:
if spec.Name != "" {
return fmt.Sprintf("query '%s'", spec.Name)
}
return fmt.Sprintf("trace query at position %d", index+1)
case QueryBuilderQuery[LogAggregation]:
if spec.Name != "" {
return fmt.Sprintf("query '%s'", spec.Name)
}
return fmt.Sprintf("log query at position %d", index+1)
case QueryBuilderQuery[MetricAggregation]:
if spec.Name != "" {
return fmt.Sprintf("query '%s'", spec.Name)
}
return fmt.Sprintf("metric query at position %d", index+1)
}
typeLabel = "query"
case QueryTypeFormula:
if spec, ok := envelope.Spec.(QueryBuilderFormula); ok && spec.Name != "" {
return fmt.Sprintf("formula '%s'", spec.Name)
}
return fmt.Sprintf("formula at position %d", index+1)
typeLabel = "formula"
case QueryTypeTraceOperator:
if spec, ok := envelope.Spec.(QueryBuilderTraceOperator); ok && spec.Name != "" {
return fmt.Sprintf("trace operator '%s'", spec.Name)
}
return fmt.Sprintf("trace operator at position %d", index+1)
typeLabel = "trace operator"
case QueryTypeJoin:
if spec, ok := envelope.Spec.(QueryBuilderJoin); ok && spec.Name != "" {
return fmt.Sprintf("join '%s'", spec.Name)
}
return fmt.Sprintf("join at position %d", index+1)
typeLabel = "join"
case QueryTypePromQL:
if spec, ok := envelope.Spec.(PromQuery); ok && spec.Name != "" {
return fmt.Sprintf("PromQL query '%s'", spec.Name)
}
return fmt.Sprintf("PromQL query at position %d", index+1)
typeLabel = "PromQL query"
case QueryTypeClickHouseSQL:
if spec, ok := envelope.Spec.(ClickHouseQuery); ok && spec.Name != "" {
return fmt.Sprintf("ClickHouse query '%s'", spec.Name)
}
return fmt.Sprintf("ClickHouse query at position %d", index+1)
typeLabel = "ClickHouse query"
default:
typeLabel = "query"
}
return fmt.Sprintf("query at position %d", index+1)
if name != "" {
return fmt.Sprintf("%s '%s'", typeLabel, name)
}
return fmt.Sprintf("%s at position %d", typeLabel, index+1)
}
const (
@@ -72,11 +96,12 @@ func (q *QueryBuilderQuery[T]) Validate(requestType RequestType) error {
return err
}
// Validate aggregations only for non-raw request types
if requestType != RequestTypeRaw && requestType != RequestTypeRawStream && requestType != RequestTypeTrace {
if err := q.validateAggregations(); err != nil {
return err
}
if err := q.validateAggregations(requestType); err != nil {
return err
}
if err := q.validateGroupBy(requestType); err != nil {
return err
}
// Validate limit and pagination
@@ -94,32 +119,23 @@ func (q *QueryBuilderQuery[T]) Validate(requestType RequestType) error {
return err
}
if requestType != RequestTypeRaw && requestType != RequestTypeTrace && len(q.Aggregations) > 0 {
if err := q.validateOrderByForAggregation(); err != nil {
return err
}
} else {
if err := q.validateOrderBy(); err != nil {
return err
}
if err := q.validateOrderBy(requestType); err != nil {
return err
}
if requestType != RequestTypeRaw && requestType != RequestTypeTrace {
if err := q.validateHaving(); err != nil {
return err
}
}
if requestType == RequestTypeRaw {
if err := q.validateSelectFields(); err != nil {
return err
}
if err := q.validateSelectFields(requestType); err != nil {
return err
}
return nil
}
func (q *QueryBuilderQuery[T]) validateSelectFields() error {
func (q *QueryBuilderQuery[T]) validateSelectFields(requestType RequestType) error {
// selectFields don't apply to aggregation queries, skip validation
if requestType.IsAggregation() {
return nil
}
// isRoot and isEntryPoint are returned by the Metadata API, so if someone sends them, we have to reject the request.
for _, v := range q.SelectFields {
if v.Name == "isRoot" || v.Name == "isEntryPoint" {
@@ -132,6 +148,21 @@ func (q *QueryBuilderQuery[T]) validateSelectFields() error {
return nil
}
func (q *QueryBuilderQuery[T]) validateGroupBy(requestType RequestType) error {
// groupBy doesn't apply to non-aggregation queries, skip validation
if !requestType.IsAggregation() {
return nil
}
for idx, item := range q.GroupBy {
if item.TelemetryFieldKey.Name == "" {
return errors.NewInvalidInputf(
errors.CodeInvalidInput, "invalid empty key name for group by at index %d", idx,
)
}
}
return nil
}
func (q *QueryBuilderQuery[T]) validateSignal() error {
// Signal validation is handled during unmarshaling in req.go
// Valid signals are: metrics, traces, logs
@@ -152,7 +183,12 @@ func (q *QueryBuilderQuery[T]) validateSignal() error {
}
}
func (q *QueryBuilderQuery[T]) validateAggregations() error {
func (q *QueryBuilderQuery[T]) validateAggregations(requestType RequestType) error {
// aggregations don't apply to non-aggregation queries, skip validation
if !requestType.IsAggregation() {
return nil
}
// At least one aggregation required for non-disabled queries
if len(q.Aggregations) == 0 && !q.Disabled {
return errors.NewInvalidInputf(
@@ -179,14 +215,6 @@ func (q *QueryBuilderQuery[T]) validateAggregations() error {
aggId,
)
}
// Validate metric-specific aggregations
if err := validateMetricAggregation(v); err != nil {
aggId := fmt.Sprintf("aggregation #%d", i+1)
if q.Name != "" {
aggId = fmt.Sprintf("aggregation #%d in query '%s'", i+1, q.Name)
}
return wrapValidationError(err, aggId, "invalid metric %s: %s")
}
case TraceAggregation:
if v.Expression == "" {
aggId := fmt.Sprintf("aggregation #%d", i+1)
@@ -301,7 +329,7 @@ func (q *QueryBuilderQuery[T]) validateSecondaryAggregations() error {
return nil
}
func (q *QueryBuilderQuery[T]) validateOrderBy() error {
func (q *QueryBuilderQuery[T]) validateOrderBy(requestType RequestType) error {
for i, order := range q.Order {
// Direction validation is handled by the OrderDirection type
if order.Direction != OrderDirectionAsc && order.Direction != OrderDirectionDesc {
@@ -319,6 +347,12 @@ func (q *QueryBuilderQuery[T]) validateOrderBy() error {
)
}
}
// aggregation-specific order key validation only applies to aggregation queries
if requestType.IsAggregation() {
return q.validateOrderByForAggregation()
}
return nil
}
@@ -328,10 +362,6 @@ func (q *QueryBuilderQuery[T]) validateOrderBy() error {
// 2. Aggregation expressions or aliases
// 3. Aggregation index (0, 1, 2, etc.)
func (q *QueryBuilderQuery[T]) validateOrderByForAggregation() error {
// First validate basic order by constraints
if err := q.validateOrderBy(); err != nil {
return err
}
validOrderKeys := make(map[string]bool)
@@ -401,22 +431,6 @@ func (q *QueryBuilderQuery[T]) validateOrderByForAggregation() error {
return nil
}
func (q *QueryBuilderQuery[T]) validateHaving() error {
if q.Having == nil || q.Having.Expression == "" {
return nil
}
// ensure that having is only used with aggregations
if len(q.Aggregations) == 0 {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"having clause can only be used with aggregation queries. Use `filter.expression` instead",
)
}
return nil
}
// ValidateQueryRangeRequest validates the entire query range request
func (r *QueryRangeRequest) Validate() error {
// Validate time range
@@ -456,236 +470,20 @@ func (r *QueryRangeRequest) Validate() error {
// validateAllQueriesNotDisabled validates that at least one query in the composite query is enabled
func (r *QueryRangeRequest) validateAllQueriesNotDisabled() error {
allDisabled := true
for _, envelope := range r.CompositeQuery.Queries {
switch envelope.Type {
case QueryTypeBuilder, QueryTypeSubQuery:
switch spec := envelope.Spec.(type) {
case QueryBuilderQuery[TraceAggregation]:
if !spec.Disabled {
allDisabled = false
}
case QueryBuilderQuery[LogAggregation]:
if !spec.Disabled {
allDisabled = false
}
case QueryBuilderQuery[MetricAggregation]:
if !spec.Disabled {
allDisabled = false
}
}
case QueryTypeFormula:
if spec, ok := envelope.Spec.(QueryBuilderFormula); ok && !spec.Disabled {
allDisabled = false
}
case QueryTypeTraceOperator:
if spec, ok := envelope.Spec.(QueryBuilderTraceOperator); ok && !spec.Disabled {
allDisabled = false
}
case QueryTypeJoin:
if spec, ok := envelope.Spec.(QueryBuilderJoin); ok && !spec.Disabled {
allDisabled = false
}
case QueryTypePromQL:
if spec, ok := envelope.Spec.(PromQuery); ok && !spec.Disabled {
allDisabled = false
}
case QueryTypeClickHouseSQL:
if spec, ok := envelope.Spec.(ClickHouseQuery); ok && !spec.Disabled {
allDisabled = false
}
}
// Early exit if we find at least one enabled query
if !allDisabled {
break
if !envelope.isDisabled() {
return nil
}
}
if allDisabled {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"all queries are disabled - at least one query must be enabled",
)
}
return nil
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"all queries are disabled - at least one query must be enabled",
)
}
func (r *QueryRangeRequest) validateCompositeQuery() error {
// Validate queries in composite query
if len(r.CompositeQuery.Queries) == 0 {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"at least one query is required",
)
}
// Track query names for uniqueness (only for non-formula queries)
queryNames := make(map[string]bool)
// Validate each query based on its type
for i, envelope := range r.CompositeQuery.Queries {
switch envelope.Type {
case QueryTypeBuilder, QueryTypeSubQuery:
// Validate based on the concrete type
switch spec := envelope.Spec.(type) {
case QueryBuilderQuery[TraceAggregation]:
if err := spec.Validate(r.RequestType); err != nil {
queryId := getQueryIdentifier(envelope, i)
return wrapValidationError(err, queryId, "invalid %s: %s")
}
// Check name uniqueness for non-formula context
if spec.Name != "" {
if queryNames[spec.Name] {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"duplicate query name '%s'",
spec.Name,
)
}
queryNames[spec.Name] = true
}
case QueryBuilderQuery[LogAggregation]:
if err := spec.Validate(r.RequestType); err != nil {
queryId := getQueryIdentifier(envelope, i)
return wrapValidationError(err, queryId, "invalid %s: %s")
}
// Check name uniqueness for non-formula context
if spec.Name != "" {
if queryNames[spec.Name] {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"duplicate query name '%s'",
spec.Name,
)
}
queryNames[spec.Name] = true
}
case QueryBuilderQuery[MetricAggregation]:
if err := spec.Validate(r.RequestType); err != nil {
queryId := getQueryIdentifier(envelope, i)
return wrapValidationError(err, queryId, "invalid %s: %s")
}
// Check name uniqueness for non-formula context
if spec.Name != "" {
if queryNames[spec.Name] {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"duplicate query name '%s'",
spec.Name,
)
}
queryNames[spec.Name] = true
}
default:
queryId := getQueryIdentifier(envelope, i)
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"unknown spec type for %s",
queryId,
)
}
case QueryTypeFormula:
// Formula validation is handled separately
spec, ok := envelope.Spec.(QueryBuilderFormula)
if !ok {
queryId := getQueryIdentifier(envelope, i)
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"invalid spec for %s",
queryId,
)
}
if spec.Expression == "" {
queryId := getQueryIdentifier(envelope, i)
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"expression is required for %s",
queryId,
)
}
case QueryTypeJoin:
// Join validation is handled separately
_, ok := envelope.Spec.(QueryBuilderJoin)
if !ok {
queryId := getQueryIdentifier(envelope, i)
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"invalid spec for %s",
queryId,
)
}
case QueryTypeTraceOperator:
spec, ok := envelope.Spec.(QueryBuilderTraceOperator)
if !ok {
queryId := getQueryIdentifier(envelope, i)
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"invalid spec for %s",
queryId,
)
}
if spec.Expression == "" {
queryId := getQueryIdentifier(envelope, i)
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"expression is required for %s",
queryId,
)
}
case QueryTypePromQL:
// PromQL validation is handled separately
spec, ok := envelope.Spec.(PromQuery)
if !ok {
queryId := getQueryIdentifier(envelope, i)
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"invalid spec for %s",
queryId,
)
}
if spec.Query == "" {
queryId := getQueryIdentifier(envelope, i)
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"query expression is required for %s",
queryId,
)
}
case QueryTypeClickHouseSQL:
// ClickHouse SQL validation is handled separately
spec, ok := envelope.Spec.(ClickHouseQuery)
if !ok {
queryId := getQueryIdentifier(envelope, i)
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"invalid spec for %s",
queryId,
)
}
if spec.Query == "" {
queryId := getQueryIdentifier(envelope, i)
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"query expression is required for %s",
queryId,
)
}
default:
queryId := getQueryIdentifier(envelope, i)
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"unknown query type '%s' for %s",
envelope.Type,
queryId,
).WithAdditional(
"Valid query types are: builder_query, builder_formula, builder_join, promql, clickhouse_sql, trace_operator",
)
}
}
return nil
return r.CompositeQuery.Validate(r.RequestType)
}
// Validate performs validation on CompositeQuery
@@ -697,12 +495,29 @@ func (c *CompositeQuery) Validate(requestType RequestType) error {
)
}
// Validate each query
// Track query names for uniqueness (only for builder queries)
queryNames := make(map[string]bool)
for i, envelope := range c.Queries {
if err := validateQueryEnvelope(envelope, requestType); err != nil {
queryId := getQueryIdentifier(envelope, i)
return wrapValidationError(err, queryId, "invalid %s: %s")
}
// Check name uniqueness for builder queries
if envelope.Type == QueryTypeBuilder || envelope.Type == QueryTypeSubQuery {
name := envelope.queryName()
if name != "" {
if queryNames[name] {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"duplicate query name '%s'",
name,
)
}
queryNames[name] = true
}
}
}
return nil
@@ -803,85 +618,3 @@ func validateQueryEnvelope(envelope QueryEnvelope, requestType RequestType) erro
)
}
}
// validateMetricAggregation validates metric-specific aggregation parameters
func validateMetricAggregation(agg MetricAggregation) error {
// we can't decide anything here without known temporality
if agg.Temporality == metrictypes.Unknown {
return nil
}
// Validate that rate/increase are only used with appropriate temporalities
if agg.TimeAggregation == metrictypes.TimeAggregationRate || agg.TimeAggregation == metrictypes.TimeAggregationIncrease {
// For gauge metrics (Unspecified temporality), rate/increase doesn't make sense
if agg.Temporality == metrictypes.Unspecified {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"rate/increase aggregation cannot be used with gauge metrics (unspecified temporality)",
)
}
}
// Validate percentile aggregations are only used with histogram types
if agg.SpaceAggregation.IsPercentile() {
if agg.Type != metrictypes.HistogramType && agg.Type != metrictypes.ExpHistogramType && agg.Type != metrictypes.SummaryType {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"percentile aggregation can only be used with histogram or summary metric types",
)
}
}
// Validate time aggregation values
validTimeAggregations := []metrictypes.TimeAggregation{
metrictypes.TimeAggregationUnspecified,
metrictypes.TimeAggregationLatest,
metrictypes.TimeAggregationSum,
metrictypes.TimeAggregationAvg,
metrictypes.TimeAggregationMin,
metrictypes.TimeAggregationMax,
metrictypes.TimeAggregationCount,
metrictypes.TimeAggregationCountDistinct,
metrictypes.TimeAggregationRate,
metrictypes.TimeAggregationIncrease,
}
validTimeAgg := slices.Contains(validTimeAggregations, agg.TimeAggregation)
if !validTimeAgg {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"invalid time aggregation: %s",
agg.TimeAggregation.StringValue(),
).WithAdditional(
"Valid time aggregations: latest, sum, avg, min, max, count, count_distinct, rate, increase",
)
}
// Validate space aggregation values
validSpaceAggregations := []metrictypes.SpaceAggregation{
metrictypes.SpaceAggregationUnspecified,
metrictypes.SpaceAggregationSum,
metrictypes.SpaceAggregationAvg,
metrictypes.SpaceAggregationMin,
metrictypes.SpaceAggregationMax,
metrictypes.SpaceAggregationCount,
metrictypes.SpaceAggregationPercentile50,
metrictypes.SpaceAggregationPercentile75,
metrictypes.SpaceAggregationPercentile90,
metrictypes.SpaceAggregationPercentile95,
metrictypes.SpaceAggregationPercentile99,
}
validSpaceAgg := slices.Contains(validSpaceAggregations, agg.SpaceAggregation)
if !validSpaceAgg {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"invalid space aggregation: %s",
agg.SpaceAggregation.StringValue(),
).WithAdditional(
"Valid space aggregations: sum, avg, min, max, count, p50, p75, p90, p95, p99",
)
}
return nil
}

View File

@@ -333,6 +333,617 @@ func TestQueryRangeRequest_ValidateAllQueriesNotDisabled(t *testing.T) {
}
}
func TestQueryRangeRequest_ValidateCompositeQuery(t *testing.T) {
tests := []struct {
name string
request QueryRangeRequest
wantErr bool
errMsg string
}{
{
name: "empty composite query should return error",
request: QueryRangeRequest{
Start: 1640995200000,
End: 1640998800000,
RequestType: RequestTypeTimeSeries,
CompositeQuery: CompositeQuery{
Queries: []QueryEnvelope{},
},
},
wantErr: true,
errMsg: "at least one query is required",
},
{
name: "duplicate builder query names should return error",
request: QueryRangeRequest{
Start: 1640995200000,
End: 1640998800000,
RequestType: RequestTypeTimeSeries,
CompositeQuery: CompositeQuery{
Queries: []QueryEnvelope{
{
Type: QueryTypeBuilder,
Spec: QueryBuilderQuery[LogAggregation]{
Name: "A",
Disabled: true,
Signal: telemetrytypes.SignalLogs,
},
},
{
Type: QueryTypeBuilder,
Spec: QueryBuilderQuery[TraceAggregation]{
Name: "A",
Disabled: true,
Signal: telemetrytypes.SignalTraces,
},
},
},
},
},
wantErr: true,
errMsg: "duplicate query name 'A'",
},
{
name: "duplicate names across log and metric builder queries should return error",
request: QueryRangeRequest{
Start: 1640995200000,
End: 1640998800000,
RequestType: RequestTypeTimeSeries,
CompositeQuery: CompositeQuery{
Queries: []QueryEnvelope{
{
Type: QueryTypeBuilder,
Spec: QueryBuilderQuery[LogAggregation]{
Name: "X",
Disabled: true,
Signal: telemetrytypes.SignalLogs,
},
},
{
Type: QueryTypeBuilder,
Spec: QueryBuilderQuery[MetricAggregation]{
Name: "X",
Disabled: true,
Signal: telemetrytypes.SignalMetrics,
},
},
},
},
},
wantErr: true,
errMsg: "duplicate query name 'X'",
},
{
name: "same name on formula and builder should not conflict",
request: QueryRangeRequest{
Start: 1640995200000,
End: 1640998800000,
RequestType: RequestTypeTimeSeries,
CompositeQuery: CompositeQuery{
Queries: []QueryEnvelope{
{
Type: QueryTypeBuilder,
Spec: QueryBuilderQuery[LogAggregation]{
Name: "A",
Signal: telemetrytypes.SignalLogs,
Aggregations: []LogAggregation{
{Expression: "count()"},
},
},
},
{
Type: QueryTypeFormula,
Spec: QueryBuilderFormula{
Name: "A",
Expression: "A + 1",
},
},
},
},
},
wantErr: false,
},
{
name: "formula with empty expression should return error",
request: QueryRangeRequest{
Start: 1640995200000,
End: 1640998800000,
RequestType: RequestTypeTimeSeries,
CompositeQuery: CompositeQuery{
Queries: []QueryEnvelope{
{
Type: QueryTypeFormula,
Spec: QueryBuilderFormula{
Name: "F1",
Expression: "",
},
},
},
},
},
wantErr: true,
errMsg: "expression is required",
},
{
name: "promql with empty query should return error",
request: QueryRangeRequest{
Start: 1640995200000,
End: 1640998800000,
RequestType: RequestTypeTimeSeries,
CompositeQuery: CompositeQuery{
Queries: []QueryEnvelope{
{
Type: QueryTypePromQL,
Spec: PromQuery{
Name: "P1",
Query: "",
},
},
},
},
},
wantErr: true,
errMsg: "PromQL query is required",
},
{
name: "clickhouse with empty query should return error",
request: QueryRangeRequest{
Start: 1640995200000,
End: 1640998800000,
RequestType: RequestTypeTimeSeries,
CompositeQuery: CompositeQuery{
Queries: []QueryEnvelope{
{
Type: QueryTypeClickHouseSQL,
Spec: ClickHouseQuery{
Name: "CH1",
Query: "",
},
},
},
},
},
wantErr: true,
errMsg: "ClickHouse SQL query is required",
},
{
name: "trace operator with empty expression should return error",
request: QueryRangeRequest{
Start: 1640995200000,
End: 1640998800000,
RequestType: RequestTypeTimeSeries,
CompositeQuery: CompositeQuery{
Queries: []QueryEnvelope{
{
Type: QueryTypeTraceOperator,
Spec: QueryBuilderTraceOperator{
Name: "TO1",
Expression: "",
},
},
},
},
},
wantErr: true,
errMsg: "expression is required",
},
{
name: "valid promql query should pass",
request: QueryRangeRequest{
Start: 1640995200000,
End: 1640998800000,
RequestType: RequestTypeTimeSeries,
CompositeQuery: CompositeQuery{
Queries: []QueryEnvelope{
{
Type: QueryTypePromQL,
Spec: PromQuery{
Name: "P1",
Query: "up",
},
},
},
},
},
wantErr: false,
},
{
name: "valid clickhouse query should pass",
request: QueryRangeRequest{
Start: 1640995200000,
End: 1640998800000,
RequestType: RequestTypeTimeSeries,
CompositeQuery: CompositeQuery{
Queries: []QueryEnvelope{
{
Type: QueryTypeClickHouseSQL,
Spec: ClickHouseQuery{
Name: "CH1",
Query: "SELECT count() FROM logs",
},
},
},
},
},
wantErr: false,
},
{
name: "valid mixed queries with unique builder names should pass",
request: QueryRangeRequest{
Start: 1640995200000,
End: 1640998800000,
RequestType: RequestTypeTimeSeries,
CompositeQuery: CompositeQuery{
Queries: []QueryEnvelope{
{
Type: QueryTypeBuilder,
Spec: QueryBuilderQuery[LogAggregation]{
Name: "A",
Signal: telemetrytypes.SignalLogs,
Aggregations: []LogAggregation{
{Expression: "count()"},
},
},
},
{
Type: QueryTypeBuilder,
Spec: QueryBuilderQuery[TraceAggregation]{
Name: "B",
Signal: telemetrytypes.SignalTraces,
Aggregations: []TraceAggregation{
{Expression: "count()"},
},
},
},
{
Type: QueryTypePromQL,
Spec: PromQuery{
Name: "C",
Query: "up",
},
},
},
},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := tt.request.Validate()
if tt.wantErr {
if err == nil {
t.Errorf("Validate() expected error but got none")
return
}
if tt.errMsg != "" && !contains(err.Error(), tt.errMsg) {
t.Errorf("Validate() error = %v, want to contain %v", err.Error(), tt.errMsg)
}
} else {
if err != nil {
t.Errorf("Validate() unexpected error = %v", err)
}
}
})
}
}
func TestValidateQueryEnvelope(t *testing.T) {
tests := []struct {
name string
envelope QueryEnvelope
requestType RequestType
wantErr bool
errMsg string
}{
{
name: "valid builder query with trace aggregation",
envelope: QueryEnvelope{
Type: QueryTypeBuilder,
Spec: QueryBuilderQuery[TraceAggregation]{
Name: "A",
Signal: telemetrytypes.SignalTraces,
Aggregations: []TraceAggregation{
{Expression: "count()"},
},
},
},
requestType: RequestTypeTimeSeries,
wantErr: false,
},
{
name: "valid formula with expression",
envelope: QueryEnvelope{
Type: QueryTypeFormula,
Spec: QueryBuilderFormula{
Name: "F1",
Expression: "A + B",
},
},
requestType: RequestTypeTimeSeries,
wantErr: false,
},
{
name: "formula with empty expression should fail",
envelope: QueryEnvelope{
Type: QueryTypeFormula,
Spec: QueryBuilderFormula{
Name: "F1",
Expression: "",
},
},
requestType: RequestTypeTimeSeries,
wantErr: true,
errMsg: "expression is required",
},
{
name: "valid join spec",
envelope: QueryEnvelope{
Type: QueryTypeJoin,
Spec: QueryBuilderJoin{
Name: "J1",
},
},
requestType: RequestTypeTimeSeries,
wantErr: false,
},
{
name: "valid trace operator",
envelope: QueryEnvelope{
Type: QueryTypeTraceOperator,
Spec: QueryBuilderTraceOperator{
Name: "TO1",
Expression: "count()",
},
},
requestType: RequestTypeTimeSeries,
wantErr: false,
},
{
name: "trace operator with empty expression should fail",
envelope: QueryEnvelope{
Type: QueryTypeTraceOperator,
Spec: QueryBuilderTraceOperator{
Name: "TO1",
Expression: "",
},
},
requestType: RequestTypeTimeSeries,
wantErr: true,
errMsg: "expression is required",
},
{
name: "promql with empty query should fail",
envelope: QueryEnvelope{
Type: QueryTypePromQL,
Spec: PromQuery{
Name: "P1",
Query: "",
},
},
requestType: RequestTypeTimeSeries,
wantErr: true,
errMsg: "PromQL query is required",
},
{
name: "clickhouse with empty query should fail",
envelope: QueryEnvelope{
Type: QueryTypeClickHouseSQL,
Spec: ClickHouseQuery{
Name: "CH1",
Query: "",
},
},
requestType: RequestTypeTimeSeries,
wantErr: true,
errMsg: "ClickHouse SQL query is required",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validateQueryEnvelope(tt.envelope, tt.requestType)
if tt.wantErr {
if err == nil {
t.Errorf("validateQueryEnvelope() expected error but got none")
return
}
if tt.errMsg != "" && !contains(err.Error(), tt.errMsg) {
t.Errorf("validateQueryEnvelope() error = %v, want to contain %v", err.Error(), tt.errMsg)
}
} else {
if err != nil {
t.Errorf("validateQueryEnvelope() unexpected error = %v", err)
}
}
})
}
}
func TestQueryEnvelope_Helpers(t *testing.T) {
t.Run("queryName", func(t *testing.T) {
tests := []struct {
name string
envelope QueryEnvelope
want string
}{
{
name: "trace builder query",
envelope: QueryEnvelope{Type: QueryTypeBuilder, Spec: QueryBuilderQuery[TraceAggregation]{Name: "A"}},
want: "A",
},
{
name: "log builder query",
envelope: QueryEnvelope{Type: QueryTypeBuilder, Spec: QueryBuilderQuery[LogAggregation]{Name: "B"}},
want: "B",
},
{
name: "metric builder query",
envelope: QueryEnvelope{Type: QueryTypeBuilder, Spec: QueryBuilderQuery[MetricAggregation]{Name: "C"}},
want: "C",
},
{
name: "formula",
envelope: QueryEnvelope{Type: QueryTypeFormula, Spec: QueryBuilderFormula{Name: "F1"}},
want: "F1",
},
{
name: "promql",
envelope: QueryEnvelope{Type: QueryTypePromQL, Spec: PromQuery{Name: "P1"}},
want: "P1",
},
{
name: "clickhouse",
envelope: QueryEnvelope{Type: QueryTypeClickHouseSQL, Spec: ClickHouseQuery{Name: "CH1"}},
want: "CH1",
},
{
name: "trace operator",
envelope: QueryEnvelope{Type: QueryTypeTraceOperator, Spec: QueryBuilderTraceOperator{Name: "TO1"}},
want: "TO1",
},
{
name: "join",
envelope: QueryEnvelope{Type: QueryTypeJoin, Spec: QueryBuilderJoin{Name: "J1"}},
want: "J1",
},
{
name: "empty name",
envelope: QueryEnvelope{Type: QueryTypeBuilder, Spec: QueryBuilderQuery[LogAggregation]{}},
want: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := tt.envelope.queryName()
if got != tt.want {
t.Errorf("queryName() = %q, want %q", got, tt.want)
}
})
}
})
t.Run("isDisabled", func(t *testing.T) {
tests := []struct {
name string
envelope QueryEnvelope
want bool
}{
{
name: "enabled builder query",
envelope: QueryEnvelope{Type: QueryTypeBuilder, Spec: QueryBuilderQuery[LogAggregation]{Disabled: false}},
want: false,
},
{
name: "disabled builder query",
envelope: QueryEnvelope{Type: QueryTypeBuilder, Spec: QueryBuilderQuery[LogAggregation]{Disabled: true}},
want: true,
},
{
name: "disabled formula",
envelope: QueryEnvelope{Type: QueryTypeFormula, Spec: QueryBuilderFormula{Disabled: true}},
want: true,
},
{
name: "enabled promql",
envelope: QueryEnvelope{Type: QueryTypePromQL, Spec: PromQuery{Disabled: false}},
want: false,
},
{
name: "disabled clickhouse",
envelope: QueryEnvelope{Type: QueryTypeClickHouseSQL, Spec: ClickHouseQuery{Disabled: true}},
want: true,
},
{
name: "disabled trace operator",
envelope: QueryEnvelope{Type: QueryTypeTraceOperator, Spec: QueryBuilderTraceOperator{Disabled: true}},
want: true,
},
{
name: "disabled join",
envelope: QueryEnvelope{Type: QueryTypeJoin, Spec: QueryBuilderJoin{Disabled: true}},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := tt.envelope.isDisabled()
if got != tt.want {
t.Errorf("isDisabled() = %v, want %v", got, tt.want)
}
})
}
})
}
func TestGetQueryIdentifier(t *testing.T) {
tests := []struct {
name string
envelope QueryEnvelope
index int
want string
}{
{
name: "builder query with name",
envelope: QueryEnvelope{Type: QueryTypeBuilder, Spec: QueryBuilderQuery[LogAggregation]{Name: "A"}},
index: 0,
want: "query 'A'",
},
{
name: "builder query without name",
envelope: QueryEnvelope{Type: QueryTypeBuilder, Spec: QueryBuilderQuery[LogAggregation]{}},
index: 2,
want: "query at position 3",
},
{
name: "formula with name",
envelope: QueryEnvelope{Type: QueryTypeFormula, Spec: QueryBuilderFormula{Name: "F1"}},
index: 0,
want: "formula 'F1'",
},
{
name: "formula without name",
envelope: QueryEnvelope{Type: QueryTypeFormula, Spec: QueryBuilderFormula{}},
index: 1,
want: "formula at position 2",
},
{
name: "promql with name",
envelope: QueryEnvelope{Type: QueryTypePromQL, Spec: PromQuery{Name: "P1"}},
index: 0,
want: "PromQL query 'P1'",
},
{
name: "clickhouse with name",
envelope: QueryEnvelope{Type: QueryTypeClickHouseSQL, Spec: ClickHouseQuery{Name: "CH1"}},
index: 0,
want: "ClickHouse query 'CH1'",
},
{
name: "trace operator with name",
envelope: QueryEnvelope{Type: QueryTypeTraceOperator, Spec: QueryBuilderTraceOperator{Name: "TO1"}},
index: 0,
want: "trace operator 'TO1'",
},
{
name: "join without name",
envelope: QueryEnvelope{Type: QueryTypeJoin, Spec: QueryBuilderJoin{}},
index: 0,
want: "join at position 1",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := getQueryIdentifier(tt.envelope, tt.index)
if got != tt.want {
t.Errorf("getQueryIdentifier() = %q, want %q", got, tt.want)
}
})
}
}
func TestQueryRangeRequest_ValidateOrderByForAggregation(t *testing.T) {
tests := []struct {
name string
@@ -512,4 +1123,139 @@ func TestQueryRangeRequest_ValidateOrderByForAggregation(t *testing.T) {
}
})
}
}
}
func TestRequestType_IsAggregation(t *testing.T) {
tests := []struct {
name string
requestType RequestType
want bool
}{
{"time_series is aggregation", RequestTypeTimeSeries, true},
{"scalar is aggregation", RequestTypeScalar, true},
{"distribution is aggregation", RequestTypeDistribution, true},
{"raw is not aggregation", RequestTypeRaw, false},
{"raw_stream is not aggregation", RequestTypeRawStream, false},
{"trace is not aggregation", RequestTypeTrace, false},
{"unknown is not aggregation", RequestTypeUnknown, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := tt.requestType.IsAggregation()
if got != tt.want {
t.Errorf("IsAggregation() = %v, want %v", got, tt.want)
}
})
}
}
func TestNonAggregationFieldsSkipped(t *testing.T) {
// Fields that only apply to aggregation queries (groupBy, having, aggregations)
// should be silently skipped for non-aggregation request types.
t.Run("groupBy ignored for raw request type", func(t *testing.T) {
query := QueryBuilderQuery[LogAggregation]{
Name: "A",
Signal: telemetrytypes.SignalLogs,
GroupBy: []GroupByKey{
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
},
}
err := query.Validate(RequestTypeRaw)
if err != nil {
t.Errorf("expected no error for groupBy with raw request type, got: %v", err)
}
})
t.Run("groupBy validated for timeseries request type", func(t *testing.T) {
query := QueryBuilderQuery[LogAggregation]{
Name: "A",
Signal: telemetrytypes.SignalLogs,
Aggregations: []LogAggregation{
{Expression: "count()"},
},
GroupBy: []GroupByKey{
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: ""}},
},
}
err := query.Validate(RequestTypeTimeSeries)
if err == nil {
t.Errorf("expected error for empty groupBy key with timeseries request type")
}
})
t.Run("having ignored for raw request type", func(t *testing.T) {
query := QueryBuilderQuery[LogAggregation]{
Name: "A",
Signal: telemetrytypes.SignalLogs,
Having: &Having{Expression: "count() > 10"},
}
err := query.Validate(RequestTypeRaw)
if err != nil {
t.Errorf("expected no error for having with raw request type, got: %v", err)
}
})
t.Run("having ignored for trace request type", func(t *testing.T) {
query := QueryBuilderQuery[TraceAggregation]{
Name: "A",
Signal: telemetrytypes.SignalTraces,
Having: &Having{Expression: "count() > 10"},
}
err := query.Validate(RequestTypeTrace)
if err != nil {
t.Errorf("expected no error for having with trace request type, got: %v", err)
}
})
t.Run("aggregations ignored for raw request type", func(t *testing.T) {
query := QueryBuilderQuery[LogAggregation]{
Name: "A",
Signal: telemetrytypes.SignalLogs,
Aggregations: []LogAggregation{
{Expression: "count()"},
},
}
err := query.Validate(RequestTypeRaw)
if err != nil {
t.Errorf("expected no error for aggregations with raw request type, got: %v", err)
}
})
t.Run("aggregations ignored for raw_stream request type", func(t *testing.T) {
query := QueryBuilderQuery[LogAggregation]{
Name: "A",
Signal: telemetrytypes.SignalLogs,
Aggregations: []LogAggregation{
{Expression: "count()"},
},
}
err := query.Validate(RequestTypeRawStream)
if err != nil {
t.Errorf("expected no error for aggregations with raw_stream request type, got: %v", err)
}
})
t.Run("selectFields validated for raw but not timeseries", func(t *testing.T) {
query := QueryBuilderQuery[TraceAggregation]{
Name: "A",
Signal: telemetrytypes.SignalTraces,
Aggregations: []TraceAggregation{
{Expression: "count()"},
},
SelectFields: []telemetrytypes.TelemetryFieldKey{
{Name: "isRoot"},
},
}
// Should error for raw (selectFields are validated)
err := query.Validate(RequestTypeRaw)
if err == nil {
t.Errorf("expected error for isRoot in selectFields with raw request type")
}
// Should pass for timeseries (selectFields skipped)
err = query.Validate(RequestTypeTimeSeries)
if err != nil {
t.Errorf("expected no error for isRoot in selectFields with timeseries request type, got: %v", err)
}
})
}

View File

@@ -78,18 +78,18 @@ func (f *TelemetryFieldKey) ArrayParentSelectors() []*FieldKeySelector {
func (f TelemetryFieldKey) String() string {
var sb strings.Builder
sb.WriteString(fmt.Sprintf("name=%s", f.Name))
fmt.Fprintf(&sb, "name=%s", f.Name)
if f.FieldContext != FieldContextUnspecified {
sb.WriteString(fmt.Sprintf(",context=%s", f.FieldContext.String))
fmt.Fprintf(&sb, ",context=%s", f.FieldContext.String)
}
if f.FieldDataType != FieldDataTypeUnspecified {
sb.WriteString(fmt.Sprintf(",datatype=%s", f.FieldDataType.StringValue()))
fmt.Fprintf(&sb, ",datatype=%s", f.FieldDataType.StringValue())
}
if f.Materialized {
sb.WriteString(",materialized=true")
}
if f.JSONDataType != nil {
sb.WriteString(fmt.Sprintf(",jsondatatype=%s", f.JSONDataType.StringValue()))
fmt.Fprintf(&sb, ",jsondatatype=%s", f.JSONDataType.StringValue())
}
if len(f.Indexes) > 0 {
sb.WriteString(",indexes=[")
@@ -97,7 +97,7 @@ func (f TelemetryFieldKey) String() string {
if i > 0 {
sb.WriteString("; ")
}
sb.WriteString(fmt.Sprintf("{type=%s, columnExpr=%s, indexExpr=%s}", index.Type.StringValue(), index.ColumnExpression, index.IndexExpression))
fmt.Fprintf(&sb, "{type=%s, columnExpr=%s, indexExpr=%s}", index.Type.StringValue(), index.ColumnExpression, index.IndexExpression)
}
sb.WriteString("]")
}
@@ -108,6 +108,17 @@ func (f TelemetryFieldKey) Text() string {
return TelemetryFieldKeyToText(&f)
}
// OverrideMetadataFrom copies the resolved metadata fields from src into f.
// This is used when adjusting user-provided keys to match known field definitions.
func (f *TelemetryFieldKey) OverrideMetadataFrom(src *TelemetryFieldKey) {
f.FieldContext = src.FieldContext
f.FieldDataType = src.FieldDataType
f.JSONDataType = src.JSONDataType
f.Indexes = src.Indexes
f.Materialized = src.Materialized
f.JSONPlan = src.JSONPlan
}
func (f *TelemetryFieldKey) Equal(key *TelemetryFieldKey) bool {
return f.Name == key.Name &&
f.FieldContext == key.FieldContext &&
@@ -225,11 +236,19 @@ func TelemetryFieldKeyToText(key *TelemetryFieldKey) string {
}
func FieldKeyToMaterializedColumnName(key *TelemetryFieldKey) string {
return fmt.Sprintf("`%s_%s_%s`", key.FieldContext.String, fieldDataTypes[key.FieldDataType.StringValue()].StringValue(), strings.ReplaceAll(key.Name, ".", "$$"))
return fmt.Sprintf("`%s_%s_%s`",
key.FieldContext.String,
fieldDataTypes[key.FieldDataType.StringValue()].StringValue(),
strings.ReplaceAll(key.Name, ".", "$$"),
)
}
func FieldKeyToMaterializedColumnNameForExists(key *TelemetryFieldKey) string {
return fmt.Sprintf("`%s_%s_%s_exists`", key.FieldContext.String, fieldDataTypes[key.FieldDataType.StringValue()].StringValue(), strings.ReplaceAll(key.Name, ".", "$$"))
return fmt.Sprintf("`%s_%s_%s_exists`",
key.FieldContext.String,
fieldDataTypes[key.FieldDataType.StringValue()].StringValue(),
strings.ReplaceAll(key.Name, ".", "$$"),
)
}
type TelemetryFieldValues struct {

View File

@@ -11,16 +11,15 @@ import (
)
var (
ErrCodeUserNotFound = errors.MustNewCode("user_not_found")
ErrCodeAmbiguousUser = errors.MustNewCode("ambiguous_user")
ErrUserAlreadyExists = errors.MustNewCode("user_already_exists")
ErrPasswordAlreadyExists = errors.MustNewCode("password_already_exists")
ErrResetPasswordTokenAlreadyExists = errors.MustNewCode("reset_password_token_already_exists")
ErrPasswordNotFound = errors.MustNewCode("password_not_found")
ErrResetPasswordTokenNotFound = errors.MustNewCode("reset_password_token_not_found")
ErrAPIKeyAlreadyExists = errors.MustNewCode("api_key_already_exists")
ErrAPIKeyNotFound = errors.MustNewCode("api_key_not_found")
ErrCodeRootUserOperationUnsupported = errors.MustNewCode("root_user_operation_unsupported")
ErrCodeUserNotFound = errors.MustNewCode("user_not_found")
ErrCodeAmbiguousUser = errors.MustNewCode("ambiguous_user")
ErrUserAlreadyExists = errors.MustNewCode("user_already_exists")
ErrPasswordAlreadyExists = errors.MustNewCode("password_already_exists")
ErrResetPasswordTokenAlreadyExists = errors.MustNewCode("reset_password_token_already_exists")
ErrPasswordNotFound = errors.MustNewCode("password_not_found")
ErrResetPasswordTokenNotFound = errors.MustNewCode("reset_password_token_not_found")
ErrAPIKeyAlreadyExists = errors.MustNewCode("api_key_already_exists")
ErrAPIKeyNotFound = errors.MustNewCode("api_key_not_found")
)
type GettableUser = User
@@ -30,10 +29,9 @@ type User struct {
Identifiable
DisplayName string `bun:"display_name" json:"displayName"`
Email valuer.Email `bun:"email" json:"email"`
Role Role `bun:"role" json:"role"`
OrgID valuer.UUID `bun:"org_id" json:"orgId"`
IsRoot bool `bun:"is_root" json:"isRoot"`
Email valuer.Email `bun:"email,type:text" json:"email"`
Role Role `bun:"role,type:text" json:"role"`
OrgID valuer.UUID `bun:"org_id,type:text" json:"orgId"`
TimeAuditable
}
@@ -66,7 +64,6 @@ func NewUser(displayName string, email valuer.Email, role Role, orgID valuer.UUI
Email: email,
Role: role,
OrgID: orgID,
IsRoot: false,
TimeAuditable: TimeAuditable{
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
@@ -74,65 +71,6 @@ func NewUser(displayName string, email valuer.Email, role Role, orgID valuer.UUI
}, nil
}
func NewRootUser(displayName string, email valuer.Email, orgID valuer.UUID) (*User, error) {
if email.IsZero() {
return nil, errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "email is required")
}
if orgID.IsZero() {
return nil, errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgID is required")
}
return &User{
Identifiable: Identifiable{
ID: valuer.GenerateUUID(),
},
DisplayName: displayName,
Email: email,
Role: RoleAdmin,
OrgID: orgID,
IsRoot: true,
TimeAuditable: TimeAuditable{
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
}, nil
}
// Update applies mutable fields from the input to the user. Immutable fields
// (email, is_root, org_id, id) are preserved. Only non-zero input fields are applied.
func (u *User) Update(displayName string, role Role) {
if displayName != "" {
u.DisplayName = displayName
}
if role != "" {
u.Role = role
}
u.UpdatedAt = time.Now()
}
// PromoteToRoot promotes the user to a root user with admin role.
func (u *User) PromoteToRoot() {
u.IsRoot = true
u.Role = RoleAdmin
u.UpdatedAt = time.Now()
}
// UpdateEmail updates the email of the user.
func (u *User) UpdateEmail(email valuer.Email) {
u.Email = email
u.UpdatedAt = time.Now()
}
// ErrIfRoot returns an error if the user is a root user. The caller should
// enrich the error with the specific operation using errors.WithAdditionalf.
func (u *User) ErrIfRoot() error {
if u.IsRoot {
return errors.New(errors.TypeUnsupported, ErrCodeRootUserOperationUnsupported, "this operation is not supported for the root user")
}
return nil
}
func NewTraitsFromUser(user *User) map[string]any {
return map[string]any{
"name": user.DisplayName,
@@ -195,7 +133,7 @@ type UserStore interface {
// List users by email and org ids.
ListUsersByEmailAndOrgIDs(ctx context.Context, email valuer.Email, orgIDs []valuer.UUID) ([]*User, error)
UpdateUser(ctx context.Context, orgID valuer.UUID, user *User) error
UpdateUser(ctx context.Context, orgID valuer.UUID, id string, user *User) (*User, error)
DeleteUser(ctx context.Context, orgID string, id string) error
// Creates a password.
@@ -218,9 +156,6 @@ type UserStore interface {
CountByOrgID(ctx context.Context, orgID valuer.UUID) (int64, error)
// Get root user by org.
GetRootUserByOrgID(ctx context.Context, orgID valuer.UUID) (*User, error)
// Transaction
RunInTx(ctx context.Context, cb func(ctx context.Context) error) error
}

View File

@@ -14,10 +14,10 @@ from fixtures.alertutils import (
from fixtures.logger import setup_logger
from fixtures.utils import get_testdata_file_path
# test cases for match type and compare operators have wait time of 30 seconds to verify the alert expectation.
# we've poistioned the alert data to fire the alert on first eval of rule manager, the eval frequency
# for most alert rules are set of 15s so considering this delay plus some delay from alert manager's
# group_wait and group_interval, even in worst case most alerts should be triggered in about 30 seconds
# Alert test cases use a 30-second wait time to verify expected alert firing.
# Alert data is set up to trigger on the first rule manager evaluation.
# With a 15-second eval frequency for most rules, plus alertmanager's
# group_wait and group_interval delays, alerts should fire well within 30 seconds.
TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
types.AlertTestCase(
name="test_threshold_above_at_least_once",
@@ -25,6 +25,7 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
alert_data=[
types.AlertData(
type="metrics",
# active requests dummy data
data_path="alerts/test_scenarios/threshold_above_at_least_once/alert_data.jsonl",
),
],
@@ -115,30 +116,28 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
],
),
),
# TODO: @abhishekhugetech enable the test for matchType last, pylint: disable=W0511
# after the [issue](https://github.com/SigNoz/engineering-pod/issues/3801) with matchType last is fixed
# types.AlertTestCase(
# name="test_threshold_above_last",
# rule_path="alerts/test_scenarios/threshold_above_last/rule.json",
# alert_data=[
# types.AlertData(
# type="metrics",
# data_path="alerts/test_scenarios/threshold_above_last/alert_data.jsonl",
# ),
# ],
# alert_expectation=types.AlertExpectation(
# should_alert=True,
# wait_time_seconds=30,
# expected_alerts=[
# types.FiringAlert(
# labels={
# "alertname": "threshold_above_last",
# "threshold.name": "critical",
# }
# ),
# ],
# ),
# ),
types.AlertTestCase(
name="test_threshold_above_last",
rule_path="alerts/test_scenarios/threshold_above_last/rule.json",
alert_data=[
types.AlertData(
type="metrics",
data_path="alerts/test_scenarios/threshold_above_last/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "threshold_above_last",
"threshold.name": "critical",
}
),
],
),
),
types.AlertTestCase(
name="test_threshold_below_at_least_once",
rule_path="alerts/test_scenarios/threshold_below_at_least_once/rule.json",
@@ -189,6 +188,7 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
alert_data=[
types.AlertData(
type="metrics",
# one rate ~5 + rest 0.01 so it remains in total below 10
data_path="alerts/test_scenarios/threshold_below_in_total/alert_data.jsonl",
),
],
@@ -227,30 +227,28 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
],
),
),
# TODO: @abhishekhugetech enable the test for matchType last,
# after the [issue](https://github.com/SigNoz/engineering-pod/issues/3801) with matchType last is fixed, pylint: disable=W0511
# types.AlertTestCase(
# name="test_threshold_below_last",
# rule_path="alerts/test_scenarios/threshold_below_last/rule.json",
# alert_data=[
# types.AlertData(
# type="metrics",
# data_path="alerts/test_scenarios/threshold_below_last/alert_data.jsonl",
# ),
# ],
# alert_expectation=types.AlertExpectation(
# should_alert=True,
# wait_time_seconds=30,
# expected_alerts=[
# types.FiringAlert(
# labels={
# "alertname": "threshold_below_last",
# "threshold.name": "critical",
# }
# ),
# ],
# ),
# ),
types.AlertTestCase(
name="test_threshold_below_last",
rule_path="alerts/test_scenarios/threshold_below_last/rule.json",
alert_data=[
types.AlertData(
type="metrics",
data_path="alerts/test_scenarios/threshold_below_last/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "threshold_below_last",
"threshold.name": "critical",
}
),
],
),
),
types.AlertTestCase(
name="test_threshold_equal_to_at_least_once",
rule_path="alerts/test_scenarios/threshold_equal_to_at_least_once/rule.json",
@@ -339,30 +337,28 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
],
),
),
# TODO: @abhishekhugetech enable the test for matchType last,
# after the [issue](https://github.com/SigNoz/engineering-pod/issues/3801) with matchType last is fixed, pylint: disable=W0511
# types.AlertTestCase(
# name="test_threshold_equal_to_last",
# rule_path="alerts/test_scenarios/threshold_equal_to_last/rule.json",
# alert_data=[
# types.AlertData(
# type="metrics",
# data_path="alerts/test_scenarios/threshold_equal_to_last/alert_data.jsonl",
# ),
# ],
# alert_expectation=types.AlertExpectation(
# should_alert=True,
# wait_time_seconds=30,
# expected_alerts=[
# types.FiringAlert(
# labels={
# "alertname": "threshold_equal_to_last",
# "threshold.name": "critical",
# }
# ),
# ],
# ),
# ),
types.AlertTestCase(
name="test_threshold_equal_to_last",
rule_path="alerts/test_scenarios/threshold_equal_to_last/rule.json",
alert_data=[
types.AlertData(
type="metrics",
data_path="alerts/test_scenarios/threshold_equal_to_last/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "threshold_equal_to_last",
"threshold.name": "critical",
}
),
],
),
),
types.AlertTestCase(
name="test_threshold_not_equal_to_at_least_once",
rule_path="alerts/test_scenarios/threshold_not_equal_to_at_least_once/rule.json",
@@ -451,30 +447,28 @@ TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
],
),
),
# TODO: @abhishekhugetech enable the test for matchType last,
# after the [issue](https://github.com/SigNoz/engineering-pod/issues/3801) with matchType last is fixed, pylint: disable=W0511
# types.AlertTestCase(
# name="test_threshold_not_equal_to_last",
# rule_path="alerts/test_scenarios/threshold_not_equal_to_last/rule.json",
# alert_data=[
# types.AlertData(
# type="metrics",
# data_path="alerts/test_scenarios/threshold_not_equal_to_last/alert_data.jsonl",
# ),
# ],
# alert_expectation=types.AlertExpectation(
# should_alert=True,
# wait_time_seconds=30,
# expected_alerts=[
# types.FiringAlert(
# labels={
# "alertname": "threshold_not_equal_to_last",
# "threshold.name": "critical",
# }
# ),
# ],
# ),
# ),
types.AlertTestCase(
name="test_threshold_not_equal_to_last",
rule_path="alerts/test_scenarios/threshold_not_equal_to_last/rule.json",
alert_data=[
types.AlertData(
type="metrics",
data_path="alerts/test_scenarios/threshold_not_equal_to_last/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "threshold_not_equal_to_last",
"threshold.name": "critical",
}
),
],
),
),
]
# test cases unit conversion

File diff suppressed because it is too large Load Diff

View File

@@ -54,17 +54,17 @@ def test_rate_with_steady_values_and_reset(
data = response.json()
result_values = sorted(get_series_values(data, "A"), key=lambda x: x["timestamp"])
assert len(result_values) >= 59
assert len(result_values) >= 58
# the counter reset happened at 31st minute
assert (
result_values[30]["value"] == 0.0167
result_values[29]["value"] == 0.0167
) # i.e 2/120 i.e 29th to 31st minute changes
assert (
result_values[31]["value"] == 0.133
result_values[30]["value"] == 0.133
) # i.e 10/60 i.e 31st to 32nd minute changes
count_of_steady_rate = sum(1 for v in result_values if v["value"] == 0.0833)
assert (
count_of_steady_rate >= 56
count_of_steady_rate >= 55
) # 59 - (1 reset + 1 high rate + 1 at the beginning)
# All rates should be non-negative (stale periods = 0 rate)
for v in result_values:

View File

@@ -72,16 +72,17 @@ def test_with_steady_values_and_reset(
data = response.json()
result_values = sorted(get_series_values(data, "A"), key=lambda x: x["timestamp"])
assert len(result_values) >= 59
assert len(result_values) >= 58
# the counter reset happened at 31st minute
assert result_values[30]["value"] == expected_value_at_31st_minute
assert result_values[31]["value"] == expected_value_at_32nd_minute
# we skip the rate value for the first data point without previous value
assert result_values[29]["value"] == expected_value_at_31st_minute
assert result_values[30]["value"] == expected_value_at_32nd_minute
assert (
result_values[39]["value"] == steady_value
) # 39th minute is when cumulative shifts to delta
result_values[38]["value"] == steady_value
) # 38th minute is when cumulative shifts to delta
count_of_steady_rate = sum(1 for v in result_values if v["value"] == steady_value)
assert (
count_of_steady_rate >= 56
count_of_steady_rate >= 55
) # 59 - (1 reset + 1 high rate + 1 at the beginning)
# All rates should be non-negative (stale periods = 0 rate)
for v in result_values:
@@ -316,12 +317,12 @@ def test_for_service_with_switch(
data = response.json()
result_values = sorted(get_series_values(data, "A"), key=lambda x: x["timestamp"])
assert len(result_values) >= 60
assert result_values[30]["value"] == expected_value_at_30th_minute # 0.183
assert result_values[31]["value"] == expected_value_at_31st_minute # 0.183
assert result_values[38]["value"] == value_at_switch # 0.25
assert len(result_values) >= 59
assert result_values[29]["value"] == expected_value_at_30th_minute # 0.183
assert result_values[30]["value"] == expected_value_at_31st_minute # 0.183
assert result_values[37]["value"] == value_at_switch # 0.25
assert (
result_values[39]["value"] == value_at_switch # 0.25
result_values[38]["value"] == value_at_switch # 0.25
) # 39th minute is when cumulative shifts to delta
# All rates should be non-negative (stale periods = 0 rate)
for v in result_values:

View File

@@ -1,12 +1,12 @@
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:01:00+00:00","value":1,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:02:00+00:00","value":2,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:03:00+00:00","value":3,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:04:00+00:00","value":4,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:05:00+00:00","value":19,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:06:00+00:00","value":20,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:07:00+00:00","value":35,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:08:00+00:00","value":36,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:09:00+00:00","value":37,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:10:00+00:00","value":38,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:11:00+00:00","value":39,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:12:00+00:00","value":40,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:01:00+00:00","value":1,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:02:00+00:00","value":2,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:03:00+00:00","value":4,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:04:00+00:00","value":4,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:05:00+00:00","value":15,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:06:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:07:00+00:00","value":36,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:08:00+00:00","value":25,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:09:00+00:00","value":37,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:10:00+00:00","value":35,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:11:00+00:00","value":39,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:12:00+00:00","value":25,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}

View File

@@ -25,7 +25,7 @@
"type": "clickhouse_sql",
"spec": {
"name": "A",
"query": "WITH __temporal_aggregation_cte AS (\n SELECT \n fingerprint, \n toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(60)) AS ts, \n avg(value) AS per_series_value \n FROM signoz_metrics.distributed_samples_v4 AS points \n INNER JOIN (\n SELECT fingerprint \n FROM signoz_metrics.time_series_v4 \n WHERE metric_name IN ('request_total_threshold_above_at_least_once') \n AND LOWER(temporality) LIKE LOWER('cumulative') \n AND __normalized = false \n GROUP BY fingerprint\n ) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint \n WHERE metric_name IN ('request_total_threshold_above_at_least_once') \n AND unix_milli >= {{.start_timestamp_ms}} \n AND unix_milli < {{.end_timestamp_ms}} \n GROUP BY fingerprint, ts \n ORDER BY fingerprint, ts\n), \n__spatial_aggregation_cte AS (\n SELECT \n ts, \n avg(per_series_value) AS value \n FROM __temporal_aggregation_cte \n WHERE isNaN(per_series_value) = 0 \n GROUP BY ts\n) \nSELECT * FROM __spatial_aggregation_cte \nORDER BY ts"
"query": "WITH __temporal_aggregation_cte AS (\n SELECT \n fingerprint, \n toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(60)) AS ts, \n avg(value) AS per_series_value \n FROM signoz_metrics.distributed_samples_v4 AS points \n INNER JOIN (\n SELECT fingerprint \n FROM signoz_metrics.time_series_v4 \n WHERE metric_name IN ('request_total_threshold_above_at_least_once') \n AND LOWER(temporality) LIKE LOWER('cumulative') \n AND __normalized = false \n GROUP BY fingerprint\n ) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint \n WHERE metric_name IN ('request_total_threshold_above_at_least_once') \n AND unix_milli >= $start_timestamp_ms \n AND unix_milli < $end_timestamp_ms \n GROUP BY fingerprint, ts \n ORDER BY fingerprint, ts\n), \n__spatial_aggregation_cte AS (\n SELECT \n ts, \n sum(per_series_value) AS value \n FROM __temporal_aggregation_cte \n WHERE isNaN(per_series_value) = 0 \n GROUP BY ts\n) \nSELECT * FROM __spatial_aggregation_cte \nORDER BY ts"
}
}
]

View File

@@ -1,12 +1,12 @@
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":5,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":15,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":20,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":31,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":46,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":58,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":71,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":76,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":81,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":86,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":91,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":5,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":15,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":12,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":31,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":23,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":58,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":71,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":45,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":81,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":86,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":91,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}

View File

@@ -1,12 +1,12 @@
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":5,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":15,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":20,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":30,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":40,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":50,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":55,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":60,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":65,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":70,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":75,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":5,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":15,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":20,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":45,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":60,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":65,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":34,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_mb_threshold_equal_to_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":75,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}

View File

@@ -1,12 +1,12 @@
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":524288,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":1048576,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":1572864,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":2097152,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":3770016,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":5642880,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":10515744,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":11038632,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":11561520,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":12084408,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":12607296,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":13130184,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":524288,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":1048576,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":1572864,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":2097152,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":3770016,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":5642880,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":10515744,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":11038632,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":11561520,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":12084408,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":12607296,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_unit_conversion_bytes_to_mb","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":13130184,"temporality":"Cumulative","type_":"Sum","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}