mirror of
https://github.com/SigNoz/signoz.git
synced 2026-03-20 11:40:27 +00:00
Compare commits
40 Commits
feat/markd
...
nv/6204
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4d649a8e0c | ||
|
|
d1c6a3040b | ||
|
|
36e8d2e6c6 | ||
|
|
1e4191c7bf | ||
|
|
f106f57097 | ||
|
|
edffb359c1 | ||
|
|
22cc7509fc | ||
|
|
dee8e53b6b | ||
|
|
285a31afa6 | ||
|
|
8cfa9dc038 | ||
|
|
b986b0a5f4 | ||
|
|
5bafdeb373 | ||
|
|
24b72084ac | ||
|
|
2db83b453d | ||
|
|
28e0f2f7ad | ||
|
|
cd458f0205 | ||
|
|
ee2916e6c6 | ||
|
|
1c1d069263 | ||
|
|
7dc46db2e3 | ||
|
|
bfcd423a45 | ||
|
|
323b1163e5 | ||
|
|
673379a46c | ||
|
|
37f490c705 | ||
|
|
324e34092e | ||
|
|
2a2c365950 | ||
|
|
14065d39a6 | ||
|
|
bf2133f1ab | ||
|
|
d7d907f687 | ||
|
|
76b4549504 | ||
|
|
968a5089ff | ||
|
|
c082bc3d76 | ||
|
|
59e0dcc865 | ||
|
|
89840189ef | ||
|
|
b64a07db02 | ||
|
|
38d971b3c9 | ||
|
|
f8b266ce05 | ||
|
|
20f7562cbc | ||
|
|
29713964ce | ||
|
|
afb252b4f9 | ||
|
|
c808b4d759 |
10
.github/workflows/goci.yaml
vendored
10
.github/workflows/goci.yaml
vendored
@@ -102,13 +102,3 @@ jobs:
|
||||
run: |
|
||||
go run cmd/enterprise/*.go generate openapi
|
||||
git diff --compact-summary --exit-code || (echo; echo "Unexpected difference in openapi spec. Run go run cmd/enterprise/*.go generate openapi locally and commit."; exit 1)
|
||||
- name: node-install
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: "22"
|
||||
- name: install-frontend
|
||||
run: cd frontend && yarn install
|
||||
- name: generate-api-clients
|
||||
run: |
|
||||
cd frontend && yarn generate:api
|
||||
git diff --compact-summary --exit-code || (echo; echo "Unexpected difference in generated api clients. Run yarn generate:api in frontend/ locally and commit."; exit 1)
|
||||
|
||||
51
.github/workflows/jsci.yaml
vendored
51
.github/workflows/jsci.yaml
vendored
@@ -52,16 +52,16 @@ jobs:
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
md-languages:
|
||||
languages:
|
||||
if: |
|
||||
github.event_name == 'merge_group' ||
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: validate md languages
|
||||
- name: run
|
||||
run: bash frontend/scripts/validate-md-languages.sh
|
||||
authz:
|
||||
if: |
|
||||
@@ -70,44 +70,55 @@ jobs:
|
||||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Node.js
|
||||
- name: node-install
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: "22"
|
||||
|
||||
- name: Install frontend dependencies
|
||||
- name: deps-install
|
||||
working-directory: ./frontend
|
||||
run: |
|
||||
yarn install
|
||||
|
||||
- name: Install uv
|
||||
- name: uv-install
|
||||
uses: astral-sh/setup-uv@v5
|
||||
|
||||
- name: Install Python dependencies
|
||||
- name: uv-deps
|
||||
working-directory: ./tests/integration
|
||||
run: |
|
||||
uv sync
|
||||
|
||||
- name: Start test environment
|
||||
- name: setup-test
|
||||
run: |
|
||||
make py-test-setup
|
||||
|
||||
- name: Generate permissions.type.ts
|
||||
- name: generate
|
||||
working-directory: ./frontend
|
||||
run: |
|
||||
yarn generate:permissions-type
|
||||
|
||||
- name: Teardown test environment
|
||||
- name: teardown-test
|
||||
if: always()
|
||||
run: |
|
||||
make py-test-teardown
|
||||
|
||||
- name: Check for changes
|
||||
- name: validate
|
||||
run: |
|
||||
if ! git diff --exit-code frontend/src/hooks/useAuthZ/permissions.type.ts; then
|
||||
echo "::error::frontend/src/hooks/useAuthZ/permissions.type.ts is out of date. Please run the generator locally and commit the changes: npm run generate:permissions-type (from the frontend directory)"
|
||||
exit 1
|
||||
fi
|
||||
openapi:
|
||||
if: |
|
||||
github.event_name == 'merge_group' ||
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: node-install
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: "22"
|
||||
- name: install-frontend
|
||||
run: cd frontend && yarn install
|
||||
- name: generate-api-clients
|
||||
run: |
|
||||
cd frontend && yarn generate:api
|
||||
git diff --compact-summary --exit-code || (echo; echo "Unexpected difference in generated api clients. Run yarn generate:api in frontend/ locally and commit."; exit 1)
|
||||
|
||||
@@ -308,6 +308,9 @@ user:
|
||||
allow_self: true
|
||||
# The duration within which a user can reset their password.
|
||||
max_token_lifetime: 6h
|
||||
invite:
|
||||
# The duration within which a user can accept their invite.
|
||||
max_token_lifetime: 48h
|
||||
root:
|
||||
# Whether to enable the root user. When enabled, a root user is provisioned
|
||||
# on startup using the email and password below. The root user cannot be
|
||||
|
||||
@@ -190,7 +190,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.115.0
|
||||
image: signoz/signoz:v0.116.0
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
|
||||
@@ -117,7 +117,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.115.0
|
||||
image: signoz/signoz:v0.116.0
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
volumes:
|
||||
|
||||
@@ -181,7 +181,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.115.0}
|
||||
image: signoz/signoz:${VERSION:-v0.116.0}
|
||||
container_name: signoz
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
|
||||
@@ -109,7 +109,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.115.0}
|
||||
image: signoz/signoz:${VERSION:-v0.116.0}
|
||||
container_name: signoz
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
|
||||
@@ -220,6 +220,13 @@ components:
|
||||
- additions
|
||||
- deletions
|
||||
type: object
|
||||
AuthtypesPatchableRole:
|
||||
properties:
|
||||
description:
|
||||
type: string
|
||||
required:
|
||||
- description
|
||||
type: object
|
||||
AuthtypesPostableAuthDomain:
|
||||
properties:
|
||||
config:
|
||||
@@ -236,6 +243,15 @@ components:
|
||||
password:
|
||||
type: string
|
||||
type: object
|
||||
AuthtypesPostableRole:
|
||||
properties:
|
||||
description:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
AuthtypesPostableRotateToken:
|
||||
properties:
|
||||
refreshToken:
|
||||
@@ -251,6 +267,31 @@ components:
|
||||
- name
|
||||
- type
|
||||
type: object
|
||||
AuthtypesRole:
|
||||
properties:
|
||||
createdAt:
|
||||
format: date-time
|
||||
type: string
|
||||
description:
|
||||
type: string
|
||||
id:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
orgId:
|
||||
type: string
|
||||
type:
|
||||
type: string
|
||||
updatedAt:
|
||||
format: date-time
|
||||
type: string
|
||||
required:
|
||||
- id
|
||||
- name
|
||||
- description
|
||||
- type
|
||||
- orgId
|
||||
type: object
|
||||
AuthtypesRoleMapping:
|
||||
properties:
|
||||
defaultRole:
|
||||
@@ -1722,47 +1763,6 @@ components:
|
||||
- status
|
||||
- error
|
||||
type: object
|
||||
RoletypesPatchableRole:
|
||||
properties:
|
||||
description:
|
||||
type: string
|
||||
required:
|
||||
- description
|
||||
type: object
|
||||
RoletypesPostableRole:
|
||||
properties:
|
||||
description:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
RoletypesRole:
|
||||
properties:
|
||||
createdAt:
|
||||
format: date-time
|
||||
type: string
|
||||
description:
|
||||
type: string
|
||||
id:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
orgId:
|
||||
type: string
|
||||
type:
|
||||
type: string
|
||||
updatedAt:
|
||||
format: date-time
|
||||
type: string
|
||||
required:
|
||||
- id
|
||||
- name
|
||||
- description
|
||||
- type
|
||||
- orgId
|
||||
type: object
|
||||
ServiceaccounttypesFactorAPIKey:
|
||||
properties:
|
||||
createdAt:
|
||||
@@ -4234,7 +4234,7 @@ paths:
|
||||
properties:
|
||||
data:
|
||||
items:
|
||||
$ref: '#/components/schemas/RoletypesRole'
|
||||
$ref: '#/components/schemas/AuthtypesRole'
|
||||
type: array
|
||||
status:
|
||||
type: string
|
||||
@@ -4277,7 +4277,7 @@ paths:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RoletypesPostableRole'
|
||||
$ref: '#/components/schemas/AuthtypesPostableRole'
|
||||
responses:
|
||||
"201":
|
||||
content:
|
||||
@@ -4422,7 +4422,7 @@ paths:
|
||||
schema:
|
||||
properties:
|
||||
data:
|
||||
$ref: '#/components/schemas/RoletypesRole'
|
||||
$ref: '#/components/schemas/AuthtypesRole'
|
||||
status:
|
||||
type: string
|
||||
required:
|
||||
@@ -4470,7 +4470,7 @@ paths:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RoletypesPatchableRole'
|
||||
$ref: '#/components/schemas/AuthtypesPatchableRole'
|
||||
responses:
|
||||
"204":
|
||||
content:
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/roletypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
openfgav1 "github.com/openfga/api/proto/openfga/v1"
|
||||
openfgapkgtransformer "github.com/openfga/language/pkg/go/transformer"
|
||||
@@ -23,7 +22,7 @@ type provider struct {
|
||||
pkgAuthzService authz.AuthZ
|
||||
openfgaServer *openfgaserver.Server
|
||||
licensing licensing.Licensing
|
||||
store roletypes.Store
|
||||
store authtypes.RoleStore
|
||||
registry []authz.RegisterTypeable
|
||||
}
|
||||
|
||||
@@ -82,23 +81,23 @@ func (provider *provider) Write(ctx context.Context, additions []*openfgav1.Tupl
|
||||
return provider.openfgaServer.Write(ctx, additions, deletions)
|
||||
}
|
||||
|
||||
func (provider *provider) Get(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*roletypes.Role, error) {
|
||||
func (provider *provider) Get(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*authtypes.Role, error) {
|
||||
return provider.pkgAuthzService.Get(ctx, orgID, id)
|
||||
}
|
||||
|
||||
func (provider *provider) GetByOrgIDAndName(ctx context.Context, orgID valuer.UUID, name string) (*roletypes.Role, error) {
|
||||
func (provider *provider) GetByOrgIDAndName(ctx context.Context, orgID valuer.UUID, name string) (*authtypes.Role, error) {
|
||||
return provider.pkgAuthzService.GetByOrgIDAndName(ctx, orgID, name)
|
||||
}
|
||||
|
||||
func (provider *provider) List(ctx context.Context, orgID valuer.UUID) ([]*roletypes.Role, error) {
|
||||
func (provider *provider) List(ctx context.Context, orgID valuer.UUID) ([]*authtypes.Role, error) {
|
||||
return provider.pkgAuthzService.List(ctx, orgID)
|
||||
}
|
||||
|
||||
func (provider *provider) ListByOrgIDAndNames(ctx context.Context, orgID valuer.UUID, names []string) ([]*roletypes.Role, error) {
|
||||
func (provider *provider) ListByOrgIDAndNames(ctx context.Context, orgID valuer.UUID, names []string) ([]*authtypes.Role, error) {
|
||||
return provider.pkgAuthzService.ListByOrgIDAndNames(ctx, orgID, names)
|
||||
}
|
||||
|
||||
func (provider *provider) ListByOrgIDAndIDs(ctx context.Context, orgID valuer.UUID, ids []valuer.UUID) ([]*roletypes.Role, error) {
|
||||
func (provider *provider) ListByOrgIDAndIDs(ctx context.Context, orgID valuer.UUID, ids []valuer.UUID) ([]*authtypes.Role, error) {
|
||||
return provider.pkgAuthzService.ListByOrgIDAndIDs(ctx, orgID, ids)
|
||||
}
|
||||
|
||||
@@ -114,7 +113,7 @@ func (provider *provider) Revoke(ctx context.Context, orgID valuer.UUID, names [
|
||||
return provider.pkgAuthzService.Revoke(ctx, orgID, names, subject)
|
||||
}
|
||||
|
||||
func (provider *provider) CreateManagedRoles(ctx context.Context, orgID valuer.UUID, managedRoles []*roletypes.Role) error {
|
||||
func (provider *provider) CreateManagedRoles(ctx context.Context, orgID valuer.UUID, managedRoles []*authtypes.Role) error {
|
||||
return provider.pkgAuthzService.CreateManagedRoles(ctx, orgID, managedRoles)
|
||||
}
|
||||
|
||||
@@ -136,16 +135,16 @@ func (provider *provider) CreateManagedUserRoleTransactions(ctx context.Context,
|
||||
return provider.Write(ctx, tuples, nil)
|
||||
}
|
||||
|
||||
func (provider *provider) Create(ctx context.Context, orgID valuer.UUID, role *roletypes.Role) error {
|
||||
func (provider *provider) Create(ctx context.Context, orgID valuer.UUID, role *authtypes.Role) error {
|
||||
_, err := provider.licensing.GetActive(ctx, orgID)
|
||||
if err != nil {
|
||||
return errors.New(errors.TypeLicenseUnavailable, errors.CodeLicenseUnavailable, "a valid license is not available").WithAdditional("this feature requires a valid license").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
return provider.store.Create(ctx, roletypes.NewStorableRoleFromRole(role))
|
||||
return provider.store.Create(ctx, authtypes.NewStorableRoleFromRole(role))
|
||||
}
|
||||
|
||||
func (provider *provider) GetOrCreate(ctx context.Context, orgID valuer.UUID, role *roletypes.Role) (*roletypes.Role, error) {
|
||||
func (provider *provider) GetOrCreate(ctx context.Context, orgID valuer.UUID, role *authtypes.Role) (*authtypes.Role, error) {
|
||||
_, err := provider.licensing.GetActive(ctx, orgID)
|
||||
if err != nil {
|
||||
return nil, errors.New(errors.TypeLicenseUnavailable, errors.CodeLicenseUnavailable, "a valid license is not available").WithAdditional("this feature requires a valid license").WithAdditional(err.Error())
|
||||
@@ -159,10 +158,10 @@ func (provider *provider) GetOrCreate(ctx context.Context, orgID valuer.UUID, ro
|
||||
}
|
||||
|
||||
if existingRole != nil {
|
||||
return roletypes.NewRoleFromStorableRole(existingRole), nil
|
||||
return authtypes.NewRoleFromStorableRole(existingRole), nil
|
||||
}
|
||||
|
||||
err = provider.store.Create(ctx, roletypes.NewStorableRoleFromRole(role))
|
||||
err = provider.store.Create(ctx, authtypes.NewStorableRoleFromRole(role))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -217,13 +216,13 @@ func (provider *provider) GetObjects(ctx context.Context, orgID valuer.UUID, id
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
func (provider *provider) Patch(ctx context.Context, orgID valuer.UUID, role *roletypes.Role) error {
|
||||
func (provider *provider) Patch(ctx context.Context, orgID valuer.UUID, role *authtypes.Role) error {
|
||||
_, err := provider.licensing.GetActive(ctx, orgID)
|
||||
if err != nil {
|
||||
return errors.New(errors.TypeLicenseUnavailable, errors.CodeLicenseUnavailable, "a valid license is not available").WithAdditional("this feature requires a valid license").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
return provider.store.Update(ctx, orgID, roletypes.NewStorableRoleFromRole(role))
|
||||
return provider.store.Update(ctx, orgID, authtypes.NewStorableRoleFromRole(role))
|
||||
}
|
||||
|
||||
func (provider *provider) PatchObjects(ctx context.Context, orgID valuer.UUID, name string, relation authtypes.Relation, additions, deletions []*authtypes.Object) error {
|
||||
@@ -232,12 +231,12 @@ func (provider *provider) PatchObjects(ctx context.Context, orgID valuer.UUID, n
|
||||
return errors.New(errors.TypeLicenseUnavailable, errors.CodeLicenseUnavailable, "a valid license is not available").WithAdditional("this feature requires a valid license").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
additionTuples, err := roletypes.GetAdditionTuples(name, orgID, relation, additions)
|
||||
additionTuples, err := authtypes.GetAdditionTuples(name, orgID, relation, additions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
deletionTuples, err := roletypes.GetDeletionTuples(name, orgID, relation, deletions)
|
||||
deletionTuples, err := authtypes.GetDeletionTuples(name, orgID, relation, deletions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -261,7 +260,7 @@ func (provider *provider) Delete(ctx context.Context, orgID valuer.UUID, id valu
|
||||
return err
|
||||
}
|
||||
|
||||
role := roletypes.NewRoleFromStorableRole(storableRole)
|
||||
role := authtypes.NewRoleFromStorableRole(storableRole)
|
||||
err = role.ErrIfManaged()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -271,7 +270,7 @@ func (provider *provider) Delete(ctx context.Context, orgID valuer.UUID, id valu
|
||||
}
|
||||
|
||||
func (provider *provider) MustGetTypeables() []authtypes.Typeable {
|
||||
return []authtypes.Typeable{authtypes.TypeableRole, roletypes.TypeableResourcesRoles}
|
||||
return []authtypes.Typeable{authtypes.TypeableRole, authtypes.TypeableResourcesRoles}
|
||||
}
|
||||
|
||||
func (provider *provider) getManagedRoleGrantTuples(orgID valuer.UUID, userID valuer.UUID) ([]*openfgav1.TupleKey, error) {
|
||||
@@ -283,7 +282,7 @@ func (provider *provider) getManagedRoleGrantTuples(orgID valuer.UUID, userID va
|
||||
adminSubject,
|
||||
authtypes.RelationAssignee,
|
||||
[]authtypes.Selector{
|
||||
authtypes.MustNewSelector(authtypes.TypeRole, roletypes.SigNozAdminRoleName),
|
||||
authtypes.MustNewSelector(authtypes.TypeRole, authtypes.SigNozAdminRoleName),
|
||||
},
|
||||
orgID,
|
||||
)
|
||||
@@ -298,7 +297,7 @@ func (provider *provider) getManagedRoleGrantTuples(orgID valuer.UUID, userID va
|
||||
anonymousSubject,
|
||||
authtypes.RelationAssignee,
|
||||
[]authtypes.Selector{
|
||||
authtypes.MustNewSelector(authtypes.TypeRole, roletypes.SigNozAnonymousRoleName),
|
||||
authtypes.MustNewSelector(authtypes.TypeRole, authtypes.SigNozAnonymousRoleName),
|
||||
},
|
||||
orgID,
|
||||
)
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/instrumentationtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/roletypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
@@ -224,7 +223,7 @@ func (module *module) MustGetTypeables() []authtypes.Typeable {
|
||||
|
||||
func (module *module) MustGetManagedRoleTransactions() map[string][]*authtypes.Transaction {
|
||||
return map[string][]*authtypes.Transaction{
|
||||
roletypes.SigNozAnonymousRoleName: {
|
||||
authtypes.SigNozAnonymousRoleName: {
|
||||
{
|
||||
ID: valuer.GenerateUUID(),
|
||||
Relation: authtypes.RelationRead,
|
||||
|
||||
@@ -80,6 +80,21 @@ func TestManager_TestNotification_SendUnmatched_ThresholdRule(t *testing.T) {
|
||||
alertDataRows := cmock.NewRows(cols, tc.Values)
|
||||
|
||||
mock := telemetryStore.Mock()
|
||||
// Mock metadata queries for FetchTemporalityAndTypeMulti
|
||||
// First query: fetchMetricsTemporalityAndType (from signoz_metrics time series table)
|
||||
metadataCols := []cmock.ColumnType{
|
||||
{Name: "metric_name", Type: "String"},
|
||||
{Name: "temporality", Type: "String"},
|
||||
{Name: "type", Type: "String"},
|
||||
{Name: "is_monotonic", Type: "Bool"},
|
||||
}
|
||||
metadataRows := cmock.NewRows(metadataCols, [][]any{
|
||||
{"probe_success", metrictypes.Unspecified, metrictypes.GaugeType, false},
|
||||
})
|
||||
mock.ExpectQuery("*distributed_time_series_v4*").WithArgs(nil, nil, nil).WillReturnRows(metadataRows)
|
||||
// Second query: fetchMeterSourceMetricsTemporalityAndType (from signoz_meter table)
|
||||
emptyMetadataRows := cmock.NewRows(metadataCols, [][]any{})
|
||||
mock.ExpectQuery("*meter*").WithArgs(nil).WillReturnRows(emptyMetadataRows)
|
||||
|
||||
// Generate query arguments for the metric query
|
||||
evalTime := time.Now().UTC()
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
"prettify": "prettier --write .",
|
||||
"fmt": "prettier --check .",
|
||||
"lint": "eslint ./src",
|
||||
"lint:generated": "eslint ./src/api/generated --fix",
|
||||
"lint:fix": "eslint ./src --fix",
|
||||
"jest": "jest",
|
||||
"jest:coverage": "jest --coverage",
|
||||
@@ -283,4 +284,4 @@
|
||||
"tmp": "0.2.4",
|
||||
"vite": "npm:rolldown-vite@7.3.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -25,7 +25,7 @@ echo "\n✅ Prettier formatting successful"
|
||||
|
||||
# Fix linting issues
|
||||
echo "\n\n---\nRunning eslint...\n"
|
||||
if ! yarn lint --fix --quiet src/api/generated; then
|
||||
if ! yarn lint:generated; then
|
||||
echo "ESLint check failed! Please fix linting errors before proceeding."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -21,6 +21,8 @@ import type { BodyType, ErrorType } from '../../../generatedAPIInstance';
|
||||
import { GeneratedAPIInstance } from '../../../generatedAPIInstance';
|
||||
import type {
|
||||
AuthtypesPatchableObjectsDTO,
|
||||
AuthtypesPatchableRoleDTO,
|
||||
AuthtypesPostableRoleDTO,
|
||||
CreateRole201,
|
||||
DeleteRolePathParameters,
|
||||
GetObjects200,
|
||||
@@ -31,8 +33,6 @@ import type {
|
||||
PatchObjectsPathParameters,
|
||||
PatchRolePathParameters,
|
||||
RenderErrorResponseDTO,
|
||||
RoletypesPatchableRoleDTO,
|
||||
RoletypesPostableRoleDTO,
|
||||
} from '../sigNoz.schemas';
|
||||
|
||||
/**
|
||||
@@ -118,14 +118,14 @@ export const invalidateListRoles = async (
|
||||
* @summary Create role
|
||||
*/
|
||||
export const createRole = (
|
||||
roletypesPostableRoleDTO: BodyType<RoletypesPostableRoleDTO>,
|
||||
authtypesPostableRoleDTO: BodyType<AuthtypesPostableRoleDTO>,
|
||||
signal?: AbortSignal,
|
||||
) => {
|
||||
return GeneratedAPIInstance<CreateRole201>({
|
||||
url: `/api/v1/roles`,
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
data: roletypesPostableRoleDTO,
|
||||
data: authtypesPostableRoleDTO,
|
||||
signal,
|
||||
});
|
||||
};
|
||||
@@ -137,13 +137,13 @@ export const getCreateRoleMutationOptions = <
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof createRole>>,
|
||||
TError,
|
||||
{ data: BodyType<RoletypesPostableRoleDTO> },
|
||||
{ data: BodyType<AuthtypesPostableRoleDTO> },
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationOptions<
|
||||
Awaited<ReturnType<typeof createRole>>,
|
||||
TError,
|
||||
{ data: BodyType<RoletypesPostableRoleDTO> },
|
||||
{ data: BodyType<AuthtypesPostableRoleDTO> },
|
||||
TContext
|
||||
> => {
|
||||
const mutationKey = ['createRole'];
|
||||
@@ -157,7 +157,7 @@ export const getCreateRoleMutationOptions = <
|
||||
|
||||
const mutationFn: MutationFunction<
|
||||
Awaited<ReturnType<typeof createRole>>,
|
||||
{ data: BodyType<RoletypesPostableRoleDTO> }
|
||||
{ data: BodyType<AuthtypesPostableRoleDTO> }
|
||||
> = (props) => {
|
||||
const { data } = props ?? {};
|
||||
|
||||
@@ -170,7 +170,7 @@ export const getCreateRoleMutationOptions = <
|
||||
export type CreateRoleMutationResult = NonNullable<
|
||||
Awaited<ReturnType<typeof createRole>>
|
||||
>;
|
||||
export type CreateRoleMutationBody = BodyType<RoletypesPostableRoleDTO>;
|
||||
export type CreateRoleMutationBody = BodyType<AuthtypesPostableRoleDTO>;
|
||||
export type CreateRoleMutationError = ErrorType<RenderErrorResponseDTO>;
|
||||
|
||||
/**
|
||||
@@ -183,13 +183,13 @@ export const useCreateRole = <
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof createRole>>,
|
||||
TError,
|
||||
{ data: BodyType<RoletypesPostableRoleDTO> },
|
||||
{ data: BodyType<AuthtypesPostableRoleDTO> },
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationResult<
|
||||
Awaited<ReturnType<typeof createRole>>,
|
||||
TError,
|
||||
{ data: BodyType<RoletypesPostableRoleDTO> },
|
||||
{ data: BodyType<AuthtypesPostableRoleDTO> },
|
||||
TContext
|
||||
> => {
|
||||
const mutationOptions = getCreateRoleMutationOptions(options);
|
||||
@@ -370,13 +370,13 @@ export const invalidateGetRole = async (
|
||||
*/
|
||||
export const patchRole = (
|
||||
{ id }: PatchRolePathParameters,
|
||||
roletypesPatchableRoleDTO: BodyType<RoletypesPatchableRoleDTO>,
|
||||
authtypesPatchableRoleDTO: BodyType<AuthtypesPatchableRoleDTO>,
|
||||
) => {
|
||||
return GeneratedAPIInstance<string>({
|
||||
url: `/api/v1/roles/${id}`,
|
||||
method: 'PATCH',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
data: roletypesPatchableRoleDTO,
|
||||
data: authtypesPatchableRoleDTO,
|
||||
});
|
||||
};
|
||||
|
||||
@@ -389,7 +389,7 @@ export const getPatchRoleMutationOptions = <
|
||||
TError,
|
||||
{
|
||||
pathParams: PatchRolePathParameters;
|
||||
data: BodyType<RoletypesPatchableRoleDTO>;
|
||||
data: BodyType<AuthtypesPatchableRoleDTO>;
|
||||
},
|
||||
TContext
|
||||
>;
|
||||
@@ -398,7 +398,7 @@ export const getPatchRoleMutationOptions = <
|
||||
TError,
|
||||
{
|
||||
pathParams: PatchRolePathParameters;
|
||||
data: BodyType<RoletypesPatchableRoleDTO>;
|
||||
data: BodyType<AuthtypesPatchableRoleDTO>;
|
||||
},
|
||||
TContext
|
||||
> => {
|
||||
@@ -415,7 +415,7 @@ export const getPatchRoleMutationOptions = <
|
||||
Awaited<ReturnType<typeof patchRole>>,
|
||||
{
|
||||
pathParams: PatchRolePathParameters;
|
||||
data: BodyType<RoletypesPatchableRoleDTO>;
|
||||
data: BodyType<AuthtypesPatchableRoleDTO>;
|
||||
}
|
||||
> = (props) => {
|
||||
const { pathParams, data } = props ?? {};
|
||||
@@ -429,7 +429,7 @@ export const getPatchRoleMutationOptions = <
|
||||
export type PatchRoleMutationResult = NonNullable<
|
||||
Awaited<ReturnType<typeof patchRole>>
|
||||
>;
|
||||
export type PatchRoleMutationBody = BodyType<RoletypesPatchableRoleDTO>;
|
||||
export type PatchRoleMutationBody = BodyType<AuthtypesPatchableRoleDTO>;
|
||||
export type PatchRoleMutationError = ErrorType<RenderErrorResponseDTO>;
|
||||
|
||||
/**
|
||||
@@ -444,7 +444,7 @@ export const usePatchRole = <
|
||||
TError,
|
||||
{
|
||||
pathParams: PatchRolePathParameters;
|
||||
data: BodyType<RoletypesPatchableRoleDTO>;
|
||||
data: BodyType<AuthtypesPatchableRoleDTO>;
|
||||
},
|
||||
TContext
|
||||
>;
|
||||
@@ -453,7 +453,7 @@ export const usePatchRole = <
|
||||
TError,
|
||||
{
|
||||
pathParams: PatchRolePathParameters;
|
||||
data: BodyType<RoletypesPatchableRoleDTO>;
|
||||
data: BodyType<AuthtypesPatchableRoleDTO>;
|
||||
},
|
||||
TContext
|
||||
> => {
|
||||
|
||||
@@ -278,6 +278,13 @@ export interface AuthtypesPatchableObjectsDTO {
|
||||
deletions: AuthtypesGettableObjectsDTO[] | null;
|
||||
}
|
||||
|
||||
export interface AuthtypesPatchableRoleDTO {
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
description: string;
|
||||
}
|
||||
|
||||
export interface AuthtypesPostableAuthDomainDTO {
|
||||
config?: AuthtypesAuthDomainConfigDTO;
|
||||
/**
|
||||
@@ -301,6 +308,17 @@ export interface AuthtypesPostableEmailPasswordSessionDTO {
|
||||
password?: string;
|
||||
}
|
||||
|
||||
export interface AuthtypesPostableRoleDTO {
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
description?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
name: string;
|
||||
}
|
||||
|
||||
export interface AuthtypesPostableRotateTokenDTO {
|
||||
/**
|
||||
* @type string
|
||||
@@ -319,6 +337,39 @@ export interface AuthtypesResourceDTO {
|
||||
type: string;
|
||||
}
|
||||
|
||||
export interface AuthtypesRoleDTO {
|
||||
/**
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
description: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
id: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
name: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
orgId: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
type: string;
|
||||
/**
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
}
|
||||
|
||||
/**
|
||||
* @nullable
|
||||
*/
|
||||
@@ -2039,57 +2090,6 @@ export interface RenderErrorResponseDTO {
|
||||
status: string;
|
||||
}
|
||||
|
||||
export interface RoletypesPatchableRoleDTO {
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
description: string;
|
||||
}
|
||||
|
||||
export interface RoletypesPostableRoleDTO {
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
description?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
name: string;
|
||||
}
|
||||
|
||||
export interface RoletypesRoleDTO {
|
||||
/**
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
description: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
id: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
name: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
orgId: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
type: string;
|
||||
/**
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
}
|
||||
|
||||
export interface ServiceaccounttypesFactorAPIKeyDTO {
|
||||
/**
|
||||
* @type string
|
||||
@@ -3163,7 +3163,7 @@ export type ListRoles200 = {
|
||||
/**
|
||||
* @type array
|
||||
*/
|
||||
data: RoletypesRoleDTO[];
|
||||
data: AuthtypesRoleDTO[];
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -3185,7 +3185,7 @@ export type GetRolePathParameters = {
|
||||
id: string;
|
||||
};
|
||||
export type GetRole200 = {
|
||||
data: RoletypesRoleDTO;
|
||||
data: AuthtypesRoleDTO;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
|
||||
@@ -13,8 +13,8 @@ import {
|
||||
usePatchRole,
|
||||
} from 'api/generated/services/role';
|
||||
import {
|
||||
AuthtypesPostableRoleDTO,
|
||||
RenderErrorResponseDTO,
|
||||
RoletypesPostableRoleDTO,
|
||||
} from 'api/generated/services/sigNoz.schemas';
|
||||
import { ErrorType } from 'api/generatedAPIInstance';
|
||||
import ROUTES from 'constants/routes';
|
||||
@@ -114,7 +114,7 @@ function CreateRoleModal({
|
||||
data: { description: values.description || '' },
|
||||
});
|
||||
} else {
|
||||
const data: RoletypesPostableRoleDTO = {
|
||||
const data: AuthtypesPostableRoleDTO = {
|
||||
name: values.name,
|
||||
...(values.description ? { description: values.description } : {}),
|
||||
};
|
||||
|
||||
@@ -2,7 +2,7 @@ import { useCallback, useEffect, useMemo } from 'react';
|
||||
import { useHistory } from 'react-router-dom';
|
||||
import { Pagination, Skeleton } from 'antd';
|
||||
import { useListRoles } from 'api/generated/services/role';
|
||||
import { RoletypesRoleDTO } from 'api/generated/services/sigNoz.schemas';
|
||||
import { AuthtypesRoleDTO } from 'api/generated/services/sigNoz.schemas';
|
||||
import ErrorInPlace from 'components/ErrorInPlace/ErrorInPlace';
|
||||
import { DATE_TIME_FORMATS } from 'constants/dateTimeFormats';
|
||||
import ROUTES from 'constants/routes';
|
||||
@@ -20,7 +20,7 @@ const PAGE_SIZE = 20;
|
||||
|
||||
type DisplayItem =
|
||||
| { type: 'section'; label: string; count?: number }
|
||||
| { type: 'role'; role: RoletypesRoleDTO };
|
||||
| { type: 'role'; role: AuthtypesRoleDTO };
|
||||
|
||||
interface RolesListingTableProps {
|
||||
searchQuery: string;
|
||||
@@ -187,7 +187,7 @@ function RolesListingTable({
|
||||
};
|
||||
|
||||
// todo: use table from periscope when its available for consumption
|
||||
const renderRow = (role: RoletypesRoleDTO): JSX.Element => (
|
||||
const renderRow = (role: AuthtypesRoleDTO): JSX.Element => (
|
||||
<div
|
||||
key={role.id}
|
||||
className={`roles-table-row ${
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import { RoletypesRoleDTO } from 'api/generated/services/sigNoz.schemas';
|
||||
import { AuthtypesRoleDTO } from 'api/generated/services/sigNoz.schemas';
|
||||
|
||||
const orgId = '019ba2bb-2fa1-7b24-8159-cfca08617ef9';
|
||||
|
||||
export const managedRoles: RoletypesRoleDTO[] = [
|
||||
export const managedRoles: AuthtypesRoleDTO[] = [
|
||||
{
|
||||
id: '019c24aa-2248-756f-9833-984f1ab63819',
|
||||
createdAt: new Date('2026-02-03T18:00:55.624356Z'),
|
||||
@@ -35,7 +35,7 @@ export const managedRoles: RoletypesRoleDTO[] = [
|
||||
},
|
||||
];
|
||||
|
||||
export const customRoles: RoletypesRoleDTO[] = [
|
||||
export const customRoles: AuthtypesRoleDTO[] = [
|
||||
{
|
||||
id: '019c24aa-3333-0001-aaaa-111111111111',
|
||||
createdAt: new Date('2026-02-10T10:30:00.000Z'),
|
||||
@@ -56,7 +56,7 @@ export const customRoles: RoletypesRoleDTO[] = [
|
||||
},
|
||||
];
|
||||
|
||||
export const allRoles: RoletypesRoleDTO[] = [...managedRoles, ...customRoles];
|
||||
export const allRoles: AuthtypesRoleDTO[] = [...managedRoles, ...customRoles];
|
||||
|
||||
export const listRolesSuccessResponse = {
|
||||
status: 'success',
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/http/handler"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/roletypes"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
@@ -16,7 +15,7 @@ func (provider *provider) addRoleRoutes(router *mux.Router) error {
|
||||
Tags: []string{"role"},
|
||||
Summary: "Create role",
|
||||
Description: "This endpoint creates a role",
|
||||
Request: new(roletypes.PostableRole),
|
||||
Request: new(authtypes.PostableRole),
|
||||
RequestContentType: "",
|
||||
Response: new(types.Identifiable),
|
||||
ResponseContentType: "application/json",
|
||||
@@ -35,7 +34,7 @@ func (provider *provider) addRoleRoutes(router *mux.Router) error {
|
||||
Description: "This endpoint lists all roles",
|
||||
Request: nil,
|
||||
RequestContentType: "",
|
||||
Response: make([]*roletypes.Role, 0),
|
||||
Response: make([]*authtypes.Role, 0),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{},
|
||||
@@ -52,7 +51,7 @@ func (provider *provider) addRoleRoutes(router *mux.Router) error {
|
||||
Description: "This endpoint gets a role",
|
||||
Request: nil,
|
||||
RequestContentType: "",
|
||||
Response: new(roletypes.Role),
|
||||
Response: new(authtypes.Role),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{},
|
||||
@@ -84,7 +83,7 @@ func (provider *provider) addRoleRoutes(router *mux.Router) error {
|
||||
Tags: []string{"role"},
|
||||
Summary: "Patch role",
|
||||
Description: "This endpoint patches a role",
|
||||
Request: new(roletypes.PatchableRole),
|
||||
Request: new(authtypes.PatchableRole),
|
||||
RequestContentType: "",
|
||||
Response: nil,
|
||||
ResponseContentType: "application/json",
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/roletypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
openfgav1 "github.com/openfga/api/proto/openfga/v1"
|
||||
)
|
||||
@@ -30,10 +29,10 @@ type AuthZ interface {
|
||||
ListObjects(context.Context, string, authtypes.Relation, authtypes.Typeable) ([]*authtypes.Object, error)
|
||||
|
||||
// Creates the role.
|
||||
Create(context.Context, valuer.UUID, *roletypes.Role) error
|
||||
Create(context.Context, valuer.UUID, *authtypes.Role) error
|
||||
|
||||
// Gets the role if it exists or creates one.
|
||||
GetOrCreate(context.Context, valuer.UUID, *roletypes.Role) (*roletypes.Role, error)
|
||||
GetOrCreate(context.Context, valuer.UUID, *authtypes.Role) (*authtypes.Role, error)
|
||||
|
||||
// Gets the objects associated with the given role and relation.
|
||||
GetObjects(context.Context, valuer.UUID, valuer.UUID, authtypes.Relation) ([]*authtypes.Object, error)
|
||||
@@ -42,7 +41,7 @@ type AuthZ interface {
|
||||
GetResources(context.Context) []*authtypes.Resource
|
||||
|
||||
// Patches the role.
|
||||
Patch(context.Context, valuer.UUID, *roletypes.Role) error
|
||||
Patch(context.Context, valuer.UUID, *authtypes.Role) error
|
||||
|
||||
// Patches the objects in authorization server associated with the given role and relation
|
||||
PatchObjects(context.Context, valuer.UUID, string, authtypes.Relation, []*authtypes.Object, []*authtypes.Object) error
|
||||
@@ -51,19 +50,19 @@ type AuthZ interface {
|
||||
Delete(context.Context, valuer.UUID, valuer.UUID) error
|
||||
|
||||
// Gets the role
|
||||
Get(context.Context, valuer.UUID, valuer.UUID) (*roletypes.Role, error)
|
||||
Get(context.Context, valuer.UUID, valuer.UUID) (*authtypes.Role, error)
|
||||
|
||||
// Gets the role by org_id and name
|
||||
GetByOrgIDAndName(context.Context, valuer.UUID, string) (*roletypes.Role, error)
|
||||
GetByOrgIDAndName(context.Context, valuer.UUID, string) (*authtypes.Role, error)
|
||||
|
||||
// Lists all the roles for the organization.
|
||||
List(context.Context, valuer.UUID) ([]*roletypes.Role, error)
|
||||
List(context.Context, valuer.UUID) ([]*authtypes.Role, error)
|
||||
|
||||
// Lists all the roles for the organization filtered by name
|
||||
ListByOrgIDAndNames(context.Context, valuer.UUID, []string) ([]*roletypes.Role, error)
|
||||
ListByOrgIDAndNames(context.Context, valuer.UUID, []string) ([]*authtypes.Role, error)
|
||||
|
||||
// Lists all the roles for the organization filtered by ids
|
||||
ListByOrgIDAndIDs(context.Context, valuer.UUID, []valuer.UUID) ([]*roletypes.Role, error)
|
||||
ListByOrgIDAndIDs(context.Context, valuer.UUID, []valuer.UUID) ([]*authtypes.Role, error)
|
||||
|
||||
// Grants a role to the subject based on role name.
|
||||
Grant(context.Context, valuer.UUID, []string, string) error
|
||||
@@ -75,7 +74,7 @@ type AuthZ interface {
|
||||
ModifyGrant(context.Context, valuer.UUID, []string, []string, string) error
|
||||
|
||||
// Bootstrap the managed roles.
|
||||
CreateManagedRoles(context.Context, valuer.UUID, []*roletypes.Role) error
|
||||
CreateManagedRoles(context.Context, valuer.UUID, []*authtypes.Role) error
|
||||
|
||||
// Bootstrap managed roles transactions and user assignments
|
||||
CreateManagedUserRoleTransactions(context.Context, valuer.UUID, valuer.UUID) error
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types/roletypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
@@ -14,11 +14,11 @@ type store struct {
|
||||
sqlstore sqlstore.SQLStore
|
||||
}
|
||||
|
||||
func NewSqlAuthzStore(sqlstore sqlstore.SQLStore) roletypes.Store {
|
||||
func NewSqlAuthzStore(sqlstore sqlstore.SQLStore) authtypes.RoleStore {
|
||||
return &store{sqlstore: sqlstore}
|
||||
}
|
||||
|
||||
func (store *store) Create(ctx context.Context, role *roletypes.StorableRole) error {
|
||||
func (store *store) Create(ctx context.Context, role *authtypes.StorableRole) error {
|
||||
_, err := store.
|
||||
sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
@@ -32,8 +32,8 @@ func (store *store) Create(ctx context.Context, role *roletypes.StorableRole) er
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *store) Get(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*roletypes.StorableRole, error) {
|
||||
role := new(roletypes.StorableRole)
|
||||
func (store *store) Get(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*authtypes.StorableRole, error) {
|
||||
role := new(authtypes.StorableRole)
|
||||
err := store.
|
||||
sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
@@ -43,14 +43,14 @@ func (store *store) Get(ctx context.Context, orgID valuer.UUID, id valuer.UUID)
|
||||
Where("id = ?", id).
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, store.sqlstore.WrapNotFoundErrf(err, roletypes.ErrCodeRoleNotFound, "role with id: %s doesn't exist", id)
|
||||
return nil, store.sqlstore.WrapNotFoundErrf(err, authtypes.ErrCodeRoleNotFound, "role with id: %s doesn't exist", id)
|
||||
}
|
||||
|
||||
return role, nil
|
||||
}
|
||||
|
||||
func (store *store) GetByOrgIDAndName(ctx context.Context, orgID valuer.UUID, name string) (*roletypes.StorableRole, error) {
|
||||
role := new(roletypes.StorableRole)
|
||||
func (store *store) GetByOrgIDAndName(ctx context.Context, orgID valuer.UUID, name string) (*authtypes.StorableRole, error) {
|
||||
role := new(authtypes.StorableRole)
|
||||
err := store.
|
||||
sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
@@ -60,14 +60,14 @@ func (store *store) GetByOrgIDAndName(ctx context.Context, orgID valuer.UUID, na
|
||||
Where("name = ?", name).
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, store.sqlstore.WrapNotFoundErrf(err, roletypes.ErrCodeRoleNotFound, "role with name: %s doesn't exist", name)
|
||||
return nil, store.sqlstore.WrapNotFoundErrf(err, authtypes.ErrCodeRoleNotFound, "role with name: %s doesn't exist", name)
|
||||
}
|
||||
|
||||
return role, nil
|
||||
}
|
||||
|
||||
func (store *store) List(ctx context.Context, orgID valuer.UUID) ([]*roletypes.StorableRole, error) {
|
||||
roles := make([]*roletypes.StorableRole, 0)
|
||||
func (store *store) List(ctx context.Context, orgID valuer.UUID) ([]*authtypes.StorableRole, error) {
|
||||
roles := make([]*authtypes.StorableRole, 0)
|
||||
err := store.
|
||||
sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
@@ -82,8 +82,8 @@ func (store *store) List(ctx context.Context, orgID valuer.UUID) ([]*roletypes.S
|
||||
return roles, nil
|
||||
}
|
||||
|
||||
func (store *store) ListByOrgIDAndNames(ctx context.Context, orgID valuer.UUID, names []string) ([]*roletypes.StorableRole, error) {
|
||||
roles := make([]*roletypes.StorableRole, 0)
|
||||
func (store *store) ListByOrgIDAndNames(ctx context.Context, orgID valuer.UUID, names []string) ([]*authtypes.StorableRole, error) {
|
||||
roles := make([]*authtypes.StorableRole, 0)
|
||||
err := store.
|
||||
sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
@@ -99,7 +99,7 @@ func (store *store) ListByOrgIDAndNames(ctx context.Context, orgID valuer.UUID,
|
||||
if len(roles) != len(names) {
|
||||
return nil, store.sqlstore.WrapNotFoundErrf(
|
||||
nil,
|
||||
roletypes.ErrCodeRoleNotFound,
|
||||
authtypes.ErrCodeRoleNotFound,
|
||||
"not all roles found for the provided names: %v", names,
|
||||
)
|
||||
}
|
||||
@@ -107,8 +107,8 @@ func (store *store) ListByOrgIDAndNames(ctx context.Context, orgID valuer.UUID,
|
||||
return roles, nil
|
||||
}
|
||||
|
||||
func (store *store) ListByOrgIDAndIDs(ctx context.Context, orgID valuer.UUID, ids []valuer.UUID) ([]*roletypes.StorableRole, error) {
|
||||
roles := make([]*roletypes.StorableRole, 0)
|
||||
func (store *store) ListByOrgIDAndIDs(ctx context.Context, orgID valuer.UUID, ids []valuer.UUID) ([]*authtypes.StorableRole, error) {
|
||||
roles := make([]*authtypes.StorableRole, 0)
|
||||
err := store.
|
||||
sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
@@ -124,7 +124,7 @@ func (store *store) ListByOrgIDAndIDs(ctx context.Context, orgID valuer.UUID, id
|
||||
if len(roles) != len(ids) {
|
||||
return nil, store.sqlstore.WrapNotFoundErrf(
|
||||
nil,
|
||||
roletypes.ErrCodeRoleNotFound,
|
||||
authtypes.ErrCodeRoleNotFound,
|
||||
"not all roles found for the provided ids: %v", ids,
|
||||
)
|
||||
}
|
||||
@@ -132,7 +132,7 @@ func (store *store) ListByOrgIDAndIDs(ctx context.Context, orgID valuer.UUID, id
|
||||
return roles, nil
|
||||
}
|
||||
|
||||
func (store *store) Update(ctx context.Context, orgID valuer.UUID, role *roletypes.StorableRole) error {
|
||||
func (store *store) Update(ctx context.Context, orgID valuer.UUID, role *authtypes.StorableRole) error {
|
||||
_, err := store.
|
||||
sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
@@ -153,12 +153,12 @@ func (store *store) Delete(ctx context.Context, orgID valuer.UUID, id valuer.UUI
|
||||
sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
NewDelete().
|
||||
Model(new(roletypes.StorableRole)).
|
||||
Model(new(authtypes.StorableRole)).
|
||||
Where("org_id = ?", orgID).
|
||||
Where("id = ?", id).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return store.sqlstore.WrapNotFoundErrf(err, roletypes.ErrCodeRoleNotFound, "role with id %s doesn't exist", id)
|
||||
return store.sqlstore.WrapNotFoundErrf(err, authtypes.ErrCodeRoleNotFound, "role with id %s doesn't exist", id)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/authz/openfgaserver"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/roletypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
@@ -19,7 +18,7 @@ import (
|
||||
|
||||
type provider struct {
|
||||
server *openfgaserver.Server
|
||||
store roletypes.Store
|
||||
store authtypes.RoleStore
|
||||
}
|
||||
|
||||
func NewProviderFactory(sqlstore sqlstore.SQLStore, openfgaSchema []openfgapkgtransformer.ModuleFile) factory.ProviderFactory[authz.AuthZ, authz.Config] {
|
||||
@@ -68,61 +67,61 @@ func (provider *provider) ListObjects(ctx context.Context, subject string, relat
|
||||
return provider.server.ListObjects(ctx, subject, relation, typeable)
|
||||
}
|
||||
|
||||
func (provider *provider) Get(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*roletypes.Role, error) {
|
||||
func (provider *provider) Get(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*authtypes.Role, error) {
|
||||
storableRole, err := provider.store.Get(ctx, orgID, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return roletypes.NewRoleFromStorableRole(storableRole), nil
|
||||
return authtypes.NewRoleFromStorableRole(storableRole), nil
|
||||
}
|
||||
|
||||
func (provider *provider) GetByOrgIDAndName(ctx context.Context, orgID valuer.UUID, name string) (*roletypes.Role, error) {
|
||||
func (provider *provider) GetByOrgIDAndName(ctx context.Context, orgID valuer.UUID, name string) (*authtypes.Role, error) {
|
||||
storableRole, err := provider.store.GetByOrgIDAndName(ctx, orgID, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return roletypes.NewRoleFromStorableRole(storableRole), nil
|
||||
return authtypes.NewRoleFromStorableRole(storableRole), nil
|
||||
}
|
||||
|
||||
func (provider *provider) List(ctx context.Context, orgID valuer.UUID) ([]*roletypes.Role, error) {
|
||||
func (provider *provider) List(ctx context.Context, orgID valuer.UUID) ([]*authtypes.Role, error) {
|
||||
storableRoles, err := provider.store.List(ctx, orgID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
roles := make([]*roletypes.Role, len(storableRoles))
|
||||
roles := make([]*authtypes.Role, len(storableRoles))
|
||||
for idx, storableRole := range storableRoles {
|
||||
roles[idx] = roletypes.NewRoleFromStorableRole(storableRole)
|
||||
roles[idx] = authtypes.NewRoleFromStorableRole(storableRole)
|
||||
}
|
||||
|
||||
return roles, nil
|
||||
}
|
||||
|
||||
func (provider *provider) ListByOrgIDAndNames(ctx context.Context, orgID valuer.UUID, names []string) ([]*roletypes.Role, error) {
|
||||
func (provider *provider) ListByOrgIDAndNames(ctx context.Context, orgID valuer.UUID, names []string) ([]*authtypes.Role, error) {
|
||||
storableRoles, err := provider.store.ListByOrgIDAndNames(ctx, orgID, names)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
roles := make([]*roletypes.Role, len(storableRoles))
|
||||
roles := make([]*authtypes.Role, len(storableRoles))
|
||||
for idx, storable := range storableRoles {
|
||||
roles[idx] = roletypes.NewRoleFromStorableRole(storable)
|
||||
roles[idx] = authtypes.NewRoleFromStorableRole(storable)
|
||||
}
|
||||
|
||||
return roles, nil
|
||||
}
|
||||
|
||||
func (provider *provider) ListByOrgIDAndIDs(ctx context.Context, orgID valuer.UUID, ids []valuer.UUID) ([]*roletypes.Role, error) {
|
||||
func (provider *provider) ListByOrgIDAndIDs(ctx context.Context, orgID valuer.UUID, ids []valuer.UUID) ([]*authtypes.Role, error) {
|
||||
storableRoles, err := provider.store.ListByOrgIDAndIDs(ctx, orgID, ids)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
roles := make([]*roletypes.Role, len(storableRoles))
|
||||
roles := make([]*authtypes.Role, len(storableRoles))
|
||||
for idx, storable := range storableRoles {
|
||||
roles[idx] = roletypes.NewRoleFromStorableRole(storable)
|
||||
roles[idx] = authtypes.NewRoleFromStorableRole(storable)
|
||||
}
|
||||
|
||||
return roles, nil
|
||||
@@ -179,10 +178,10 @@ func (provider *provider) Revoke(ctx context.Context, orgID valuer.UUID, names [
|
||||
return provider.Write(ctx, nil, tuples)
|
||||
}
|
||||
|
||||
func (provider *provider) CreateManagedRoles(ctx context.Context, _ valuer.UUID, managedRoles []*roletypes.Role) error {
|
||||
func (provider *provider) CreateManagedRoles(ctx context.Context, _ valuer.UUID, managedRoles []*authtypes.Role) error {
|
||||
err := provider.store.RunInTx(ctx, func(ctx context.Context) error {
|
||||
for _, role := range managedRoles {
|
||||
err := provider.store.Create(ctx, roletypes.NewStorableRoleFromRole(role))
|
||||
err := provider.store.Create(ctx, authtypes.NewStorableRoleFromRole(role))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -199,15 +198,15 @@ func (provider *provider) CreateManagedRoles(ctx context.Context, _ valuer.UUID,
|
||||
}
|
||||
|
||||
func (provider *provider) CreateManagedUserRoleTransactions(ctx context.Context, orgID valuer.UUID, userID valuer.UUID) error {
|
||||
return provider.Grant(ctx, orgID, []string{roletypes.SigNozAdminRoleName}, authtypes.MustNewSubject(authtypes.TypeableUser, userID.String(), orgID, nil))
|
||||
return provider.Grant(ctx, orgID, []string{authtypes.SigNozAdminRoleName}, authtypes.MustNewSubject(authtypes.TypeableUser, userID.String(), orgID, nil))
|
||||
}
|
||||
|
||||
func (setter *provider) Create(_ context.Context, _ valuer.UUID, _ *roletypes.Role) error {
|
||||
return errors.Newf(errors.TypeUnsupported, roletypes.ErrCodeRoleUnsupported, "not implemented")
|
||||
func (setter *provider) Create(_ context.Context, _ valuer.UUID, _ *authtypes.Role) error {
|
||||
return errors.Newf(errors.TypeUnsupported, authtypes.ErrCodeRoleUnsupported, "not implemented")
|
||||
}
|
||||
|
||||
func (provider *provider) GetOrCreate(_ context.Context, _ valuer.UUID, _ *roletypes.Role) (*roletypes.Role, error) {
|
||||
return nil, errors.Newf(errors.TypeUnsupported, roletypes.ErrCodeRoleUnsupported, "not implemented")
|
||||
func (provider *provider) GetOrCreate(_ context.Context, _ valuer.UUID, _ *authtypes.Role) (*authtypes.Role, error) {
|
||||
return nil, errors.Newf(errors.TypeUnsupported, authtypes.ErrCodeRoleUnsupported, "not implemented")
|
||||
}
|
||||
|
||||
func (provider *provider) GetResources(_ context.Context) []*authtypes.Resource {
|
||||
@@ -215,19 +214,19 @@ func (provider *provider) GetResources(_ context.Context) []*authtypes.Resource
|
||||
}
|
||||
|
||||
func (provider *provider) GetObjects(ctx context.Context, orgID valuer.UUID, id valuer.UUID, relation authtypes.Relation) ([]*authtypes.Object, error) {
|
||||
return nil, errors.Newf(errors.TypeUnsupported, roletypes.ErrCodeRoleUnsupported, "not implemented")
|
||||
return nil, errors.Newf(errors.TypeUnsupported, authtypes.ErrCodeRoleUnsupported, "not implemented")
|
||||
}
|
||||
|
||||
func (provider *provider) Patch(_ context.Context, _ valuer.UUID, _ *roletypes.Role) error {
|
||||
return errors.Newf(errors.TypeUnsupported, roletypes.ErrCodeRoleUnsupported, "not implemented")
|
||||
func (provider *provider) Patch(_ context.Context, _ valuer.UUID, _ *authtypes.Role) error {
|
||||
return errors.Newf(errors.TypeUnsupported, authtypes.ErrCodeRoleUnsupported, "not implemented")
|
||||
}
|
||||
|
||||
func (provider *provider) PatchObjects(_ context.Context, _ valuer.UUID, _ string, _ authtypes.Relation, _, _ []*authtypes.Object) error {
|
||||
return errors.Newf(errors.TypeUnsupported, roletypes.ErrCodeRoleUnsupported, "not implemented")
|
||||
return errors.Newf(errors.TypeUnsupported, authtypes.ErrCodeRoleUnsupported, "not implemented")
|
||||
}
|
||||
|
||||
func (provider *provider) Delete(_ context.Context, _ valuer.UUID, _ valuer.UUID) error {
|
||||
return errors.Newf(errors.TypeUnsupported, roletypes.ErrCodeRoleUnsupported, "not implemented")
|
||||
return errors.Newf(errors.TypeUnsupported, authtypes.ErrCodeRoleUnsupported, "not implemented")
|
||||
}
|
||||
|
||||
func (provider *provider) MustGetTypeables() []authtypes.Typeable {
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/roletypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
@@ -30,13 +29,13 @@ func (handler *handler) Create(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
req := new(roletypes.PostableRole)
|
||||
req := new(authtypes.PostableRole)
|
||||
if err := binding.JSON.BindBody(r.Body, req); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
role := roletypes.NewRole(req.Name, req.Description, roletypes.RoleTypeCustom, valuer.MustNewUUID(claims.OrgID))
|
||||
role := authtypes.NewRole(req.Name, req.Description, authtypes.RoleTypeCustom, valuer.MustNewUUID(claims.OrgID))
|
||||
err = handler.authz.Create(ctx, valuer.MustNewUUID(claims.OrgID), role)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
@@ -56,7 +55,7 @@ func (handler *handler) Get(rw http.ResponseWriter, r *http.Request) {
|
||||
|
||||
id, ok := mux.Vars(r)["id"]
|
||||
if !ok {
|
||||
render.Error(rw, errors.New(errors.TypeInvalidInput, roletypes.ErrCodeRoleInvalidInput, "id is missing from the request"))
|
||||
render.Error(rw, errors.New(errors.TypeInvalidInput, authtypes.ErrCodeRoleInvalidInput, "id is missing from the request"))
|
||||
return
|
||||
}
|
||||
roleID, err := valuer.NewUUID(id)
|
||||
@@ -84,7 +83,7 @@ func (handler *handler) GetObjects(rw http.ResponseWriter, r *http.Request) {
|
||||
|
||||
id, ok := mux.Vars(r)["id"]
|
||||
if !ok {
|
||||
render.Error(rw, errors.New(errors.TypeInvalidInput, roletypes.ErrCodeRoleInvalidInput, "id is missing from the request"))
|
||||
render.Error(rw, errors.New(errors.TypeInvalidInput, authtypes.ErrCodeRoleInvalidInput, "id is missing from the request"))
|
||||
return
|
||||
}
|
||||
roleID, err := valuer.NewUUID(id)
|
||||
@@ -95,7 +94,7 @@ func (handler *handler) GetObjects(rw http.ResponseWriter, r *http.Request) {
|
||||
|
||||
relationStr, ok := mux.Vars(r)["relation"]
|
||||
if !ok {
|
||||
render.Error(rw, errors.New(errors.TypeInvalidInput, roletypes.ErrCodeRoleInvalidInput, "relation is missing from the request"))
|
||||
render.Error(rw, errors.New(errors.TypeInvalidInput, authtypes.ErrCodeRoleInvalidInput, "relation is missing from the request"))
|
||||
return
|
||||
}
|
||||
relation, err := authtypes.NewRelation(relationStr)
|
||||
@@ -150,7 +149,7 @@ func (handler *handler) Patch(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
req := new(roletypes.PatchableRole)
|
||||
req := new(authtypes.PatchableRole)
|
||||
if err := binding.JSON.BindBody(r.Body, req); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/ctxtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/roletypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
@@ -56,9 +55,9 @@ func (middleware *AuthZ) ViewAccess(next http.HandlerFunc) http.HandlerFunc {
|
||||
}
|
||||
|
||||
selectors := []authtypes.Selector{
|
||||
authtypes.MustNewSelector(authtypes.TypeRole, roletypes.SigNozAdminRoleName),
|
||||
authtypes.MustNewSelector(authtypes.TypeRole, roletypes.SigNozEditorRoleName),
|
||||
authtypes.MustNewSelector(authtypes.TypeRole, roletypes.SigNozViewerRoleName),
|
||||
authtypes.MustNewSelector(authtypes.TypeRole, authtypes.SigNozAdminRoleName),
|
||||
authtypes.MustNewSelector(authtypes.TypeRole, authtypes.SigNozEditorRoleName),
|
||||
authtypes.MustNewSelector(authtypes.TypeRole, authtypes.SigNozViewerRoleName),
|
||||
}
|
||||
|
||||
err = middleware.authzService.CheckWithTupleCreation(
|
||||
@@ -108,8 +107,8 @@ func (middleware *AuthZ) EditAccess(next http.HandlerFunc) http.HandlerFunc {
|
||||
}
|
||||
|
||||
selectors := []authtypes.Selector{
|
||||
authtypes.MustNewSelector(authtypes.TypeRole, roletypes.SigNozAdminRoleName),
|
||||
authtypes.MustNewSelector(authtypes.TypeRole, roletypes.SigNozEditorRoleName),
|
||||
authtypes.MustNewSelector(authtypes.TypeRole, authtypes.SigNozAdminRoleName),
|
||||
authtypes.MustNewSelector(authtypes.TypeRole, authtypes.SigNozEditorRoleName),
|
||||
}
|
||||
|
||||
err = middleware.authzService.CheckWithTupleCreation(
|
||||
@@ -159,7 +158,7 @@ func (middleware *AuthZ) AdminAccess(next http.HandlerFunc) http.HandlerFunc {
|
||||
}
|
||||
|
||||
selectors := []authtypes.Selector{
|
||||
authtypes.MustNewSelector(authtypes.TypeRole, roletypes.SigNozAdminRoleName),
|
||||
authtypes.MustNewSelector(authtypes.TypeRole, authtypes.SigNozAdminRoleName),
|
||||
}
|
||||
|
||||
err = middleware.authzService.CheckWithTupleCreation(
|
||||
|
||||
@@ -27,7 +27,12 @@ type OrgConfig struct {
|
||||
}
|
||||
|
||||
type PasswordConfig struct {
|
||||
Reset ResetConfig `mapstructure:"reset"`
|
||||
Invite InviteConfig `mapstructure:"invite"`
|
||||
Reset ResetConfig `mapstructure:"reset"`
|
||||
}
|
||||
|
||||
type InviteConfig struct {
|
||||
MaxTokenLifetime time.Duration `mapstructure:"max_token_lifetime"`
|
||||
}
|
||||
|
||||
type ResetConfig struct {
|
||||
@@ -46,6 +51,9 @@ func newConfig() factory.Config {
|
||||
AllowSelf: false,
|
||||
MaxTokenLifetime: 6 * time.Hour,
|
||||
},
|
||||
Invite: InviteConfig{
|
||||
MaxTokenLifetime: 48 * time.Hour,
|
||||
},
|
||||
},
|
||||
Root: RootConfig{
|
||||
Enabled: false,
|
||||
@@ -61,6 +69,10 @@ func (c Config) Validate() error {
|
||||
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "user::password::reset::max_token_lifetime must be positive")
|
||||
}
|
||||
|
||||
if c.Password.Invite.MaxTokenLifetime <= 0 {
|
||||
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "user::password::invite::max_token_lifetime must be positive")
|
||||
}
|
||||
|
||||
if c.Root.Enabled {
|
||||
if c.Root.Email.IsZero() {
|
||||
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "user::root::email is required when root user is enabled")
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/emailtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/roletypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/dustin/go-humanize"
|
||||
)
|
||||
@@ -204,7 +203,7 @@ func (m *Module) CreateBulkInvite(ctx context.Context, orgID valuer.UUID, userID
|
||||
|
||||
resetLink := userWithToken.ResetPasswordToken.FactorPasswordResetLink(frontendBaseUrl)
|
||||
|
||||
tokenLifetime := m.config.Password.Reset.MaxTokenLifetime
|
||||
tokenLifetime := m.config.Password.Invite.MaxTokenLifetime
|
||||
humanizedTokenLifetime := strings.TrimSpace(humanize.RelTime(time.Now(), time.Now().Add(tokenLifetime), "", ""))
|
||||
|
||||
if err := m.emailing.SendHTML(ctx, userWithToken.User.Email.String(), "You're Invited to Join SigNoz", emailtypes.TemplateNameInvitationEmail, map[string]any{
|
||||
@@ -263,7 +262,7 @@ func (module *Module) CreateUser(ctx context.Context, input *types.User, opts ..
|
||||
createUserOpts := root.NewCreateUserOptions(opts...)
|
||||
|
||||
// since assign is idempotant multiple calls to assign won't cause issues in case of retries.
|
||||
err := module.authz.Grant(ctx, input.OrgID, []string{roletypes.MustGetSigNozManagedRoleFromExistingRole(input.Role)}, authtypes.MustNewSubject(authtypes.TypeableUser, input.ID.StringValue(), input.OrgID, nil))
|
||||
err := module.authz.Grant(ctx, input.OrgID, []string{authtypes.MustGetSigNozManagedRoleFromExistingRole(input.Role)}, authtypes.MustNewSubject(authtypes.TypeableUser, input.ID.StringValue(), input.OrgID, nil))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -333,8 +332,8 @@ func (m *Module) UpdateUser(ctx context.Context, orgID valuer.UUID, id string, u
|
||||
if user.Role != "" && user.Role != existingUser.Role {
|
||||
err = m.authz.ModifyGrant(ctx,
|
||||
orgID,
|
||||
[]string{roletypes.MustGetSigNozManagedRoleFromExistingRole(existingUser.Role)},
|
||||
[]string{roletypes.MustGetSigNozManagedRoleFromExistingRole(user.Role)},
|
||||
[]string{authtypes.MustGetSigNozManagedRoleFromExistingRole(existingUser.Role)},
|
||||
[]string{authtypes.MustGetSigNozManagedRoleFromExistingRole(user.Role)},
|
||||
authtypes.MustNewSubject(authtypes.TypeableUser, id, orgID, nil),
|
||||
)
|
||||
if err != nil {
|
||||
@@ -395,7 +394,7 @@ func (module *Module) DeleteUser(ctx context.Context, orgID valuer.UUID, id stri
|
||||
}
|
||||
|
||||
// since revoke is idempotant multiple calls to revoke won't cause issues in case of retries
|
||||
err = module.authz.Revoke(ctx, orgID, []string{roletypes.MustGetSigNozManagedRoleFromExistingRole(user.Role)}, authtypes.MustNewSubject(authtypes.TypeableUser, id, orgID, nil))
|
||||
err = module.authz.Revoke(ctx, orgID, []string{authtypes.MustGetSigNozManagedRoleFromExistingRole(user.Role)}, authtypes.MustNewSubject(authtypes.TypeableUser, id, orgID, nil))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -461,7 +460,11 @@ func (module *Module) GetOrCreateResetPasswordToken(ctx context.Context, userID
|
||||
}
|
||||
|
||||
// create a new token
|
||||
resetPasswordToken, err := types.NewResetPasswordToken(password.ID, time.Now().Add(module.config.Password.Reset.MaxTokenLifetime))
|
||||
tokenLifetime := module.config.Password.Reset.MaxTokenLifetime
|
||||
if user.Status == types.UserStatusPendingInvite {
|
||||
tokenLifetime = module.config.Password.Invite.MaxTokenLifetime
|
||||
}
|
||||
resetPasswordToken, err := types.NewResetPasswordToken(password.ID, time.Now().Add(tokenLifetime))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -501,6 +504,9 @@ func (module *Module) ForgotPassword(ctx context.Context, orgID valuer.UUID, ema
|
||||
resetLink := token.FactorPasswordResetLink(frontendBaseURL)
|
||||
|
||||
tokenLifetime := module.config.Password.Reset.MaxTokenLifetime
|
||||
if user.Status == types.UserStatusPendingInvite {
|
||||
tokenLifetime = module.config.Password.Invite.MaxTokenLifetime
|
||||
}
|
||||
humanizedTokenLifetime := strings.TrimSpace(humanize.RelTime(time.Now(), time.Now().Add(tokenLifetime), "", ""))
|
||||
|
||||
if err := module.emailing.SendHTML(
|
||||
@@ -558,7 +564,7 @@ func (module *Module) UpdatePasswordByResetPasswordToken(ctx context.Context, to
|
||||
if err = module.authz.Grant(
|
||||
ctx,
|
||||
user.OrgID,
|
||||
[]string{roletypes.MustGetSigNozManagedRoleFromExistingRole(user.Role)},
|
||||
[]string{authtypes.MustGetSigNozManagedRoleFromExistingRole(user.Role)},
|
||||
authtypes.MustNewSubject(authtypes.TypeableUser, user.ID.StringValue(), user.OrgID, nil),
|
||||
); err != nil {
|
||||
return err
|
||||
@@ -692,7 +698,7 @@ func (module *Module) CreateFirstUser(ctx context.Context, organization *types.O
|
||||
return nil, err
|
||||
}
|
||||
|
||||
managedRoles := roletypes.NewManagedRoles(organization.ID)
|
||||
managedRoles := authtypes.NewManagedRoles(organization.ID)
|
||||
err = module.authz.CreateManagedUserRoleTransactions(ctx, organization.ID, user.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -793,7 +799,7 @@ func (module *Module) activatePendingUser(ctx context.Context, user *types.User)
|
||||
err := module.authz.Grant(
|
||||
ctx,
|
||||
user.OrgID,
|
||||
[]string{roletypes.MustGetSigNozManagedRoleFromExistingRole(user.Role)},
|
||||
[]string{authtypes.MustGetSigNozManagedRoleFromExistingRole(user.Role)},
|
||||
authtypes.MustNewSubject(authtypes.TypeableUser, user.ID.StringValue(), user.OrgID, nil),
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/modules/user"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/roletypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
@@ -159,8 +158,8 @@ func (s *service) createOrPromoteRootUser(ctx context.Context, orgID valuer.UUID
|
||||
if oldRole != types.RoleAdmin {
|
||||
if err := s.authz.ModifyGrant(ctx,
|
||||
orgID,
|
||||
[]string{roletypes.MustGetSigNozManagedRoleFromExistingRole(oldRole)},
|
||||
[]string{roletypes.MustGetSigNozManagedRoleFromExistingRole(types.RoleAdmin)},
|
||||
[]string{authtypes.MustGetSigNozManagedRoleFromExistingRole(oldRole)},
|
||||
[]string{authtypes.MustGetSigNozManagedRoleFromExistingRole(types.RoleAdmin)},
|
||||
authtypes.MustNewSubject(authtypes.TypeableUser, existingUser.ID.StringValue(), orgID, nil),
|
||||
); err != nil {
|
||||
return err
|
||||
|
||||
@@ -69,6 +69,7 @@ func readAsTimeSeries(rows driver.Rows, queryWindow *qbtypes.TimeRange, step qbt
|
||||
key string // deterministic join of label values
|
||||
}
|
||||
seriesMap := map[sKey]*qbtypes.TimeSeries{}
|
||||
var keyOrder []sKey // preserves ClickHouse row-arrival order
|
||||
|
||||
stepMs := uint64(step.Duration.Milliseconds())
|
||||
|
||||
@@ -219,6 +220,7 @@ func readAsTimeSeries(rows driver.Rows, queryWindow *qbtypes.TimeRange, step qbt
|
||||
if !ok {
|
||||
series = &qbtypes.TimeSeries{Labels: lblObjs}
|
||||
seriesMap[key] = series
|
||||
keyOrder = append(keyOrder, key)
|
||||
}
|
||||
series.Values = append(series.Values, &qbtypes.TimeSeriesValue{
|
||||
Timestamp: ts,
|
||||
@@ -250,8 +252,8 @@ func readAsTimeSeries(rows driver.Rows, queryWindow *qbtypes.TimeRange, step qbt
|
||||
Alias: "__result_" + strconv.Itoa(i),
|
||||
}
|
||||
}
|
||||
for k, s := range seriesMap {
|
||||
buckets[k.agg].Series = append(buckets[k.agg].Series, s)
|
||||
for _, k := range keyOrder {
|
||||
buckets[k.agg].Series = append(buckets[k.agg].Series, seriesMap[k])
|
||||
}
|
||||
|
||||
var nonEmpty []*qbtypes.AggregationBucket
|
||||
|
||||
@@ -185,22 +185,6 @@ func postProcessMetricQuery(
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
|
||||
req *qbtypes.QueryRangeRequest,
|
||||
) *qbtypes.Result {
|
||||
|
||||
config := query.Aggregations[0]
|
||||
spaceAggOrderBy := fmt.Sprintf("%s(%s)", config.SpaceAggregation.StringValue(), config.MetricName)
|
||||
timeAggOrderBy := fmt.Sprintf("%s(%s)", config.TimeAggregation.StringValue(), config.MetricName)
|
||||
timeSpaceAggOrderBy := fmt.Sprintf("%s(%s(%s))", config.SpaceAggregation.StringValue(), config.TimeAggregation.StringValue(), config.MetricName)
|
||||
|
||||
for idx := range query.Order {
|
||||
if query.Order[idx].Key.Name == spaceAggOrderBy ||
|
||||
query.Order[idx].Key.Name == timeAggOrderBy ||
|
||||
query.Order[idx].Key.Name == timeSpaceAggOrderBy {
|
||||
query.Order[idx].Key.Name = qbtypes.DefaultOrderByKey
|
||||
}
|
||||
}
|
||||
|
||||
result = q.applySeriesLimit(result, query.Limit, query.Order)
|
||||
|
||||
if len(query.Functions) > 0 {
|
||||
step := query.StepInterval.Duration.Milliseconds()
|
||||
functions := q.prepareFillZeroArgsWithStep(query.Functions, req, step)
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/types/instrumentationtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/dustin/go-humanize"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
@@ -158,7 +159,8 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
|
||||
metricNames := make([]string, 0)
|
||||
for idx, query := range req.CompositeQuery.Queries {
|
||||
event.QueryType = query.Type.StringValue()
|
||||
if query.Type == qbtypes.QueryTypeBuilder {
|
||||
switch query.Type {
|
||||
case qbtypes.QueryTypeBuilder:
|
||||
if spec, ok := query.Spec.(qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]); ok {
|
||||
for _, agg := range spec.Aggregations {
|
||||
if agg.MetricName != "" {
|
||||
@@ -236,7 +238,7 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
|
||||
}
|
||||
req.CompositeQuery.Queries[idx].Spec = spec
|
||||
}
|
||||
} else if query.Type == qbtypes.QueryTypePromQL {
|
||||
case qbtypes.QueryTypePromQL:
|
||||
event.MetricsUsed = true
|
||||
switch spec := query.Spec.(type) {
|
||||
case qbtypes.PromQuery:
|
||||
@@ -247,7 +249,7 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
|
||||
}
|
||||
req.CompositeQuery.Queries[idx].Spec = spec
|
||||
}
|
||||
} else if query.Type == qbtypes.QueryTypeClickHouseSQL {
|
||||
case qbtypes.QueryTypeClickHouseSQL:
|
||||
switch spec := query.Spec.(type) {
|
||||
case qbtypes.ClickHouseQuery:
|
||||
if strings.TrimSpace(spec.Query) != "" {
|
||||
@@ -256,7 +258,7 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
|
||||
event.TracesUsed = strings.Contains(spec.Query, "signoz_traces")
|
||||
}
|
||||
}
|
||||
} else if query.Type == qbtypes.QueryTypeTraceOperator {
|
||||
case qbtypes.QueryTypeTraceOperator:
|
||||
if spec, ok := query.Spec.(qbtypes.QueryBuilderTraceOperator); ok {
|
||||
if spec.StepInterval.Seconds() == 0 {
|
||||
spec.StepInterval = qbtypes.Step{
|
||||
@@ -276,23 +278,9 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch temporality for all metrics at once
|
||||
var metricTemporality map[string]metrictypes.Temporality
|
||||
var metricTypes map[string]metrictypes.Type
|
||||
if len(metricNames) > 0 {
|
||||
var err error
|
||||
metricTemporality, metricTypes, err = q.metadataStore.FetchTemporalityAndTypeMulti(ctx, req.Start, req.End, metricNames...)
|
||||
if err != nil {
|
||||
q.logger.WarnContext(ctx, "failed to fetch metric temporality", "error", err, "metrics", metricNames)
|
||||
// Continue without temporality - statement builder will handle unspecified
|
||||
metricTemporality = make(map[string]metrictypes.Temporality)
|
||||
metricTypes = make(map[string]metrictypes.Type)
|
||||
}
|
||||
q.logger.DebugContext(ctx, "fetched metric temporalities and types", "metric_temporality", metricTemporality, "metric_types", metricTypes)
|
||||
}
|
||||
|
||||
queries := make(map[string]qbtypes.Query)
|
||||
steps := make(map[string]qbtypes.Step)
|
||||
missingMetrics := []string{}
|
||||
|
||||
for _, query := range req.CompositeQuery.Queries {
|
||||
var queryName string
|
||||
@@ -374,15 +362,26 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
|
||||
queries[spec.Name] = bq
|
||||
steps[spec.Name] = spec.StepInterval
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]:
|
||||
var metricTemporality map[string]metrictypes.Temporality
|
||||
var metricTypes map[string]metrictypes.Type
|
||||
if len(metricNames) > 0 {
|
||||
var err error
|
||||
metricTemporality, metricTypes, err = q.metadataStore.FetchTemporalityAndTypeMulti(ctx, req.Start, req.End, metricNames...)
|
||||
if err != nil {
|
||||
q.logger.WarnContext(ctx, "failed to fetch metric temporality", "error", err, "metrics", metricNames)
|
||||
return nil, errors.NewInternalf(errors.CodeInternal, "failed to fetch metrics temporality")
|
||||
}
|
||||
q.logger.DebugContext(ctx, "fetched metric temporalities and types", "metric_temporality", metricTemporality, "metric_types", metricTypes)
|
||||
}
|
||||
for i := range spec.Aggregations {
|
||||
if spec.Aggregations[i].MetricName != "" && spec.Aggregations[i].Temporality == metrictypes.Unknown {
|
||||
if temp, ok := metricTemporality[spec.Aggregations[i].MetricName]; ok && temp != metrictypes.Unknown {
|
||||
spec.Aggregations[i].Temporality = temp
|
||||
}
|
||||
}
|
||||
// TODO(srikanthccv): warn when the metric is missing
|
||||
if spec.Aggregations[i].Temporality == metrictypes.Unknown {
|
||||
spec.Aggregations[i].Temporality = metrictypes.Unspecified
|
||||
missingMetrics = append(missingMetrics, spec.Aggregations[i].MetricName)
|
||||
continue
|
||||
}
|
||||
|
||||
if spec.Aggregations[i].MetricName != "" && spec.Aggregations[i].Type == metrictypes.UnspecifiedType {
|
||||
@@ -409,6 +408,24 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(missingMetrics) > 0 {
|
||||
lastSeenInfo, _ := q.metadataStore.FetchLastSeenInfoMulti(ctx, missingMetrics...)
|
||||
lastSeenStr := func(name string) string {
|
||||
if ts, ok := lastSeenInfo[name]; ok && ts > 0 {
|
||||
ago := humanize.RelTime(time.UnixMilli(ts), time.Now(), "ago", "from now")
|
||||
return fmt.Sprintf("%s (last seen %s)", name, ago)
|
||||
}
|
||||
return name
|
||||
}
|
||||
if len(missingMetrics) == 1 {
|
||||
return nil, errors.NewNotFoundf(errors.CodeNotFound, "no data found for the metric %s in the query time range", lastSeenStr(missingMetrics[0]))
|
||||
}
|
||||
parts := make([]string, len(missingMetrics))
|
||||
for i, m := range missingMetrics {
|
||||
parts[i] = lastSeenStr(m)
|
||||
}
|
||||
return nil, errors.NewNotFoundf(errors.CodeNotFound, "no data found for the following metrics in the query time range: %s", strings.Join(parts, ", "))
|
||||
}
|
||||
qbResp, qbErr := q.run(ctx, orgID, queries, req, steps, event)
|
||||
if qbResp != nil {
|
||||
qbResp.QBEvent = event
|
||||
@@ -663,7 +680,7 @@ func (q *querier) run(
|
||||
}
|
||||
|
||||
// executeWithCache executes a query using the bucket cache
|
||||
func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query qbtypes.Query, step qbtypes.Step, noCache bool) (*qbtypes.Result, error) {
|
||||
func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query qbtypes.Query, step qbtypes.Step, _ bool) (*qbtypes.Result, error) {
|
||||
// Get cached data and missing ranges
|
||||
cachedResult, missingRanges := q.bucketCache.GetMissRanges(ctx, orgID, query, step)
|
||||
|
||||
|
||||
@@ -76,6 +76,21 @@ func TestManager_TestNotification_SendUnmatched_ThresholdRule(t *testing.T) {
|
||||
alertDataRows := cmock.NewRows(cols, tc.Values)
|
||||
|
||||
mock := mockStore.Mock()
|
||||
// Mock metadata queries for FetchTemporalityAndTypeMulti
|
||||
// First query: fetchMetricsTemporalityAndType (from signoz_metrics time series table)
|
||||
metadataCols := []cmock.ColumnType{
|
||||
{Name: "metric_name", Type: "String"},
|
||||
{Name: "temporality", Type: "String"},
|
||||
{Name: "type", Type: "String"},
|
||||
{Name: "is_monotonic", Type: "Bool"},
|
||||
}
|
||||
metadataRows := cmock.NewRows(metadataCols, [][]any{
|
||||
{"probe_success", metrictypes.Unspecified, metrictypes.GaugeType, false},
|
||||
})
|
||||
mock.ExpectQuery("*distributed_time_series_v4*").WithArgs(nil, nil, nil).WillReturnRows(metadataRows)
|
||||
// Second query: fetchMeterSourceMetricsTemporalityAndType (from signoz_meter table)
|
||||
emptyMetadataRows := cmock.NewRows(metadataCols, [][]any{})
|
||||
mock.ExpectQuery("*meter*").WithArgs(nil).WillReturnRows(emptyMetadataRows)
|
||||
|
||||
// Generate query arguments for the metric query
|
||||
evalTime := time.Now().UTC()
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/sqlschema"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types/roletypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
@@ -54,7 +54,7 @@ func (migration *addManagedRoles) Up(ctx context.Context, db *bun.DB) error {
|
||||
return err
|
||||
}
|
||||
|
||||
managedRoles := []*roletypes.StorableRole{}
|
||||
managedRoles := []*authtypes.StorableRole{}
|
||||
for _, orgIDStr := range orgIDs {
|
||||
orgID, err := valuer.NewUUID(orgIDStr)
|
||||
if err != nil {
|
||||
@@ -62,20 +62,20 @@ func (migration *addManagedRoles) Up(ctx context.Context, db *bun.DB) error {
|
||||
}
|
||||
|
||||
// signoz admin
|
||||
signozAdminRole := roletypes.NewRole(roletypes.SigNozAdminRoleName, roletypes.SigNozAdminRoleDescription, roletypes.RoleTypeManaged, orgID)
|
||||
managedRoles = append(managedRoles, roletypes.NewStorableRoleFromRole(signozAdminRole))
|
||||
signozAdminRole := authtypes.NewRole(authtypes.SigNozAdminRoleName, authtypes.SigNozAdminRoleDescription, authtypes.RoleTypeManaged, orgID)
|
||||
managedRoles = append(managedRoles, authtypes.NewStorableRoleFromRole(signozAdminRole))
|
||||
|
||||
// signoz editor
|
||||
signozEditorRole := roletypes.NewRole(roletypes.SigNozEditorRoleName, roletypes.SigNozEditorRoleDescription, roletypes.RoleTypeManaged, orgID)
|
||||
managedRoles = append(managedRoles, roletypes.NewStorableRoleFromRole(signozEditorRole))
|
||||
signozEditorRole := authtypes.NewRole(authtypes.SigNozEditorRoleName, authtypes.SigNozEditorRoleDescription, authtypes.RoleTypeManaged, orgID)
|
||||
managedRoles = append(managedRoles, authtypes.NewStorableRoleFromRole(signozEditorRole))
|
||||
|
||||
// signoz viewer
|
||||
signozViewerRole := roletypes.NewRole(roletypes.SigNozViewerRoleName, roletypes.SigNozViewerRoleDescription, roletypes.RoleTypeManaged, orgID)
|
||||
managedRoles = append(managedRoles, roletypes.NewStorableRoleFromRole(signozViewerRole))
|
||||
signozViewerRole := authtypes.NewRole(authtypes.SigNozViewerRoleName, authtypes.SigNozViewerRoleDescription, authtypes.RoleTypeManaged, orgID)
|
||||
managedRoles = append(managedRoles, authtypes.NewStorableRoleFromRole(signozViewerRole))
|
||||
|
||||
// signoz anonymous
|
||||
signozAnonymousRole := roletypes.NewRole(roletypes.SigNozAnonymousRoleName, roletypes.SigNozAnonymousRoleDescription, roletypes.RoleTypeManaged, orgID)
|
||||
managedRoles = append(managedRoles, roletypes.NewStorableRoleFromRole(signozAnonymousRole))
|
||||
signozAnonymousRole := authtypes.NewRole(authtypes.SigNozAnonymousRoleName, authtypes.SigNozAnonymousRoleDescription, authtypes.RoleTypeManaged, orgID)
|
||||
managedRoles = append(managedRoles, authtypes.NewStorableRoleFromRole(signozAnonymousRole))
|
||||
}
|
||||
|
||||
if len(managedRoles) > 0 {
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types/roletypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/oklog/ulid/v2"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/dialect"
|
||||
@@ -83,7 +83,7 @@ func (migration *addAnonymousPublicDashboardTransaction) Up(ctx context.Context,
|
||||
INSERT INTO tuple (store, object_type, object_id, relation, _user, user_type, ulid, inserted_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT (store, object_type, object_id, relation, _user) DO NOTHING`,
|
||||
storeID, "metaresource", "organization/"+orgID+"/public-dashboard/*", "read", "role:organization/"+orgID+"/role/"+roletypes.SigNozAnonymousRoleName+"#assignee", "userset", tupleID, now,
|
||||
storeID, "metaresource", "organization/"+orgID+"/public-dashboard/*", "read", "role:organization/"+orgID+"/role/"+authtypes.SigNozAnonymousRoleName+"#assignee", "userset", tupleID, now,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -102,7 +102,7 @@ func (migration *addAnonymousPublicDashboardTransaction) Up(ctx context.Context,
|
||||
INSERT INTO changelog (store, object_type, object_id, relation, _user, operation, ulid, inserted_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT (store, ulid, object_type) DO NOTHING`,
|
||||
storeID, "metaresource", "organization/"+orgID+"/public-dashboard/*", "read", "role:organization/"+orgID+"/role/"+roletypes.SigNozAnonymousRoleName+"#assignee", "TUPLE_OPERATION_WRITE", tupleID, now,
|
||||
storeID, "metaresource", "organization/"+orgID+"/public-dashboard/*", "read", "role:organization/"+orgID+"/role/"+authtypes.SigNozAnonymousRoleName+"#assignee", "TUPLE_OPERATION_WRITE", tupleID, now,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -113,7 +113,7 @@ func (migration *addAnonymousPublicDashboardTransaction) Up(ctx context.Context,
|
||||
INSERT INTO tuple (store, object_type, object_id, relation, user_object_type, user_object_id, user_relation, user_type, ulid, inserted_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT (store, object_type, object_id, relation, user_object_type, user_object_id, user_relation) DO NOTHING`,
|
||||
storeID, "metaresource", "organization/"+orgID+"/public-dashboard/*", "read", "role", "organization/"+orgID+"/role/"+roletypes.SigNozAnonymousRoleName, "assignee", "userset", tupleID, now,
|
||||
storeID, "metaresource", "organization/"+orgID+"/public-dashboard/*", "read", "role", "organization/"+orgID+"/role/"+authtypes.SigNozAnonymousRoleName, "assignee", "userset", tupleID, now,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -132,7 +132,7 @@ func (migration *addAnonymousPublicDashboardTransaction) Up(ctx context.Context,
|
||||
INSERT INTO changelog (store, object_type, object_id, relation, user_object_type, user_object_id, user_relation, operation, ulid, inserted_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT (store, ulid, object_type) DO NOTHING`,
|
||||
storeID, "metaresource", "organization/"+orgID+"/public-dashboard/*", "read", "role", "organization/"+orgID+"/role/"+roletypes.SigNozAnonymousRoleName, "assignee", 0, tupleID, now,
|
||||
storeID, "metaresource", "organization/"+orgID+"/public-dashboard/*", "read", "role", "organization/"+orgID+"/role/"+authtypes.SigNozAnonymousRoleName, "assignee", 0, tupleID, now,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -1928,3 +1928,37 @@ func (t *telemetryMetaStore) GetFirstSeenFromMetricMetadata(ctx context.Context,
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) FetchLastSeenInfoMulti(ctx context.Context, metricNames ...string) (map[string]int64, error) {
|
||||
sb := sqlbuilder.Select(
|
||||
"metric_name",
|
||||
"max(unix_milli)",
|
||||
).
|
||||
From(t.metricsDBName + "." + telemetrymetrics.TimeseriesV4TableName)
|
||||
sb.Where(sb.In("metric_name", metricNames))
|
||||
sb.GroupBy("metric_name")
|
||||
|
||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
t.logger.DebugContext(ctx, "fetching metric last seen timestamp", "query", query, "args", args)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to fetch metric last seen info")
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
lastSeenInfo := make(map[string]int64)
|
||||
for rows.Next() {
|
||||
var metricName string
|
||||
var unix_milli int64
|
||||
if err := rows.Scan(&metricName, &unix_milli); err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to scan last seen info result")
|
||||
}
|
||||
lastSeenInfo[metricName] = unix_milli
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "error iterating over metrics temporality rows")
|
||||
}
|
||||
return lastSeenInfo, nil
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, max(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, max(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
|
||||
Args: []any{"signoz_calls_total", uint64(1747785600000), uint64(1747983420000), "cartservice", "cumulative", 0},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -84,7 +84,7 @@ func TestStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
|
||||
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
|
||||
Args: []any{"signoz_calls_total", uint64(1747872000000), uint64(1747983420000), "cartservice", "delta"},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -117,7 +117,7 @@ func TestStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `service.name`, avg(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `service.name`, avg(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
|
||||
Args: []any{"signoz_calls_total", uint64(1747872000000), uint64(1747983420000), "cartservice", "delta", 0},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -150,7 +150,7 @@ func TestStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'host.name') AS `host.name`, avg(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'host.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `host.name`, ts",
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'host.name') AS `host.name`, avg(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'host.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte WHERE (`host.name`) IN (SELECT `host.name` FROM __spatial_aggregation_cte GROUP BY `host.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `host.name`) DESC, `host.name`, ts ASC",
|
||||
Args: []any{"system.memory.usage", uint64(1747872000000), uint64(1747983420000), "big-data-node-1", "unspecified", 0},
|
||||
},
|
||||
expectedErr: nil,
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/flagger"
|
||||
@@ -539,6 +540,13 @@ func (b *MetricQueryStatementBuilder) buildSpatialAggregationCTE(
|
||||
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args
|
||||
}
|
||||
|
||||
func isMetricAggOrderByKey(key string, config qbtypes.MetricAggregation) bool {
|
||||
spaceAggOrderBy := fmt.Sprintf("%s(%s)", config.SpaceAggregation.StringValue(), config.MetricName)
|
||||
timeAggOrderBy := fmt.Sprintf("%s(%s)", config.TimeAggregation.StringValue(), config.MetricName)
|
||||
timeSpaceAggOrderBy := fmt.Sprintf("%s(%s(%s))", config.SpaceAggregation.StringValue(), config.TimeAggregation.StringValue(), config.MetricName)
|
||||
return key == spaceAggOrderBy || key == timeAggOrderBy || key == timeSpaceAggOrderBy
|
||||
}
|
||||
|
||||
func (b *MetricQueryStatementBuilder) BuildFinalSelect(
|
||||
cteFragments []string,
|
||||
cteArgs [][]any,
|
||||
@@ -546,73 +554,159 @@ func (b *MetricQueryStatementBuilder) BuildFinalSelect(
|
||||
) (*qbtypes.Statement, error) {
|
||||
metricType := query.Aggregations[0].Type
|
||||
spaceAgg := query.Aggregations[0].SpaceAggregation
|
||||
finalCTE := "__spatial_aggregation_cte"
|
||||
if metricType == metrictypes.HistogramType {
|
||||
histogramCTE, histogramCTEArgs, err := b.buildHistogramCTE(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cteFragments = append(cteFragments, histogramCTE)
|
||||
cteArgs = append(cteArgs, histogramCTEArgs)
|
||||
finalCTE = "__histogram_cte"
|
||||
}
|
||||
|
||||
groupByKeys := querybuilder.GroupByKeys(query.GroupBy)
|
||||
hasGroupBy := len(groupByKeys) > 0
|
||||
|
||||
if hasGroupBy {
|
||||
cteWithAvgColumn := b.buildCTEWithAvgColumn(query, finalCTE)
|
||||
cteFragments = append(cteFragments, cteWithAvgColumn)
|
||||
|
||||
cteWithGroupRankColumn := b.buildCTEWithGroupRank(query)
|
||||
cteFragments = append(cteFragments, cteWithGroupRankColumn)
|
||||
finalCTE = "__with_group_rank_cte"
|
||||
}
|
||||
|
||||
combined := querybuilder.CombineCTEs(cteFragments)
|
||||
|
||||
var args []any
|
||||
for _, a := range cteArgs {
|
||||
args = append(args, a...)
|
||||
}
|
||||
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
sb.Select("ts")
|
||||
if metricType == metrictypes.HistogramType && spaceAgg == metrictypes.SpaceAggregationCount && query.Aggregations[0].ComparisonSpaceAggregationParam == nil {
|
||||
sb.SelectMore("le")
|
||||
}
|
||||
sb.SelectMore(groupByKeys...)
|
||||
sb.SelectMore("value")
|
||||
sb.From(finalCTE)
|
||||
if query.Having != nil && query.Having.Expression != "" {
|
||||
rewriter := querybuilder.NewHavingExpressionRewriter()
|
||||
rewrittenExpr := rewriter.RewriteForMetrics(query.Having.Expression, query.Aggregations)
|
||||
sb.Where(rewrittenExpr)
|
||||
}
|
||||
|
||||
if metricType == metrictypes.HistogramType && spaceAgg.IsPercentile() {
|
||||
quantile := query.Aggregations[0].SpaceAggregation.Percentile()
|
||||
sb.Select("ts")
|
||||
for _, g := range query.GroupBy {
|
||||
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
sb.SelectMore(fmt.Sprintf(
|
||||
"histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), %.3f) AS value",
|
||||
quantile,
|
||||
))
|
||||
sb.From("__spatial_aggregation_cte")
|
||||
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
sb.GroupBy("ts")
|
||||
if query.Having != nil && query.Having.Expression != "" {
|
||||
rewriter := querybuilder.NewHavingExpressionRewriter()
|
||||
rewrittenExpr := rewriter.RewriteForMetrics(query.Having.Expression, query.Aggregations)
|
||||
sb.Having(rewrittenExpr)
|
||||
}
|
||||
} else if metricType == metrictypes.HistogramType && spaceAgg == metrictypes.SpaceAggregationCount && query.Aggregations[0].ComparisonSpaceAggregationParam != nil {
|
||||
sb.Select("ts")
|
||||
|
||||
for _, g := range query.GroupBy {
|
||||
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
|
||||
aggQuery, err := AggregationQueryForHistogramCountWithParams(query.Aggregations[0].ComparisonSpaceAggregationParam)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sb.SelectMore(aggQuery)
|
||||
|
||||
sb.From("__spatial_aggregation_cte")
|
||||
|
||||
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
sb.GroupBy("ts")
|
||||
|
||||
if query.Having != nil && query.Having.Expression != "" {
|
||||
rewriter := querybuilder.NewHavingExpressionRewriter()
|
||||
rewrittenExpr := rewriter.RewriteForMetrics(query.Having.Expression, query.Aggregations)
|
||||
sb.Having(rewrittenExpr)
|
||||
}
|
||||
} else {
|
||||
// for count aggregation on histograms with no params, the exact result of spatial aggregation can be sent forward
|
||||
sb.Select("*")
|
||||
sb.From("__spatial_aggregation_cte")
|
||||
if query.Having != nil && query.Having.Expression != "" {
|
||||
rewriter := querybuilder.NewHavingExpressionRewriter()
|
||||
rewrittenExpr := rewriter.RewriteForMetrics(query.Having.Expression, query.Aggregations)
|
||||
sb.Where(rewrittenExpr)
|
||||
if hasGroupBy {
|
||||
sb.OrderBy("group_rank")
|
||||
if query.Limit > 0 {
|
||||
sb.Where(fmt.Sprintf("group_rank <= %d", query.Limit))
|
||||
}
|
||||
}
|
||||
sb.OrderBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
sb.OrderBy("ts")
|
||||
if metricType == metrictypes.HistogramType && spaceAgg == metrictypes.SpaceAggregationCount && query.Aggregations[0].ComparisonSpaceAggregationParam == nil {
|
||||
sb.OrderBy("toFloat64(le)")
|
||||
}
|
||||
sb.OrderBy("ts ASC")
|
||||
|
||||
q, a := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
return &qbtypes.Statement{Query: combined + q, Args: append(args, a...)}, nil
|
||||
}
|
||||
|
||||
func (b *MetricQueryStatementBuilder) buildHistogramCTE(
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
|
||||
) (string, []any, error) {
|
||||
spaceAgg := query.Aggregations[0].SpaceAggregation
|
||||
histogramCTEQueryBuilder := sqlbuilder.NewSelectBuilder()
|
||||
if spaceAgg.IsPercentile() {
|
||||
histogramCTEQueryBuilder.Select("ts")
|
||||
for _, g := range query.GroupBy {
|
||||
histogramCTEQueryBuilder.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
quantile := spaceAgg.Percentile()
|
||||
histogramCTEQueryBuilder.SelectMore(fmt.Sprintf(
|
||||
"histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), %.3f) AS value",
|
||||
quantile,
|
||||
))
|
||||
histogramCTEQueryBuilder.From("__spatial_aggregation_cte")
|
||||
histogramCTEQueryBuilder.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
histogramCTEQueryBuilder.GroupBy("ts")
|
||||
} else if spaceAgg == metrictypes.SpaceAggregationCount && query.Aggregations[0].ComparisonSpaceAggregationParam != nil {
|
||||
histogramCTEQueryBuilder.Select("ts")
|
||||
for _, g := range query.GroupBy {
|
||||
histogramCTEQueryBuilder.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
aggQuery, err := AggregationQueryForHistogramCountWithParams(query.Aggregations[0].ComparisonSpaceAggregationParam)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
histogramCTEQueryBuilder.SelectMore(aggQuery)
|
||||
histogramCTEQueryBuilder.From("__spatial_aggregation_cte")
|
||||
histogramCTEQueryBuilder.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
histogramCTEQueryBuilder.GroupBy("ts")
|
||||
} else {
|
||||
// for count aggregation on histograms with no params, the exact result of spatial aggregation can be sent forward
|
||||
histogramCTEQueryBuilder.Select("*")
|
||||
histogramCTEQueryBuilder.From("__spatial_aggregation_cte")
|
||||
}
|
||||
histogramQueryCTE, histogramQueryCTEArgs := histogramCTEQueryBuilder.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
histogramCTE := fmt.Sprintf("__histogram_cte AS (%s)", histogramQueryCTE)
|
||||
|
||||
return histogramCTE, histogramQueryCTEArgs, nil
|
||||
}
|
||||
|
||||
/*
|
||||
this receives a CTE (__spatial_aggregation_cte or __histogram_cte) that has columns ts, value, and a column each for all the group by keys
|
||||
it creates a CTE (__with_avg_cte) that adds a column avg_val which has the avg value for the group the row belongs in
|
||||
*/
|
||||
func (b *MetricQueryStatementBuilder) buildCTEWithAvgColumn(
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
|
||||
latestCTE string,
|
||||
) string {
|
||||
withAvgCTEBuilder := sqlbuilder.NewSelectBuilder()
|
||||
withAvgCTEBuilder.Select("*")
|
||||
groupByKeys := querybuilder.GroupByKeys(query.GroupBy)
|
||||
withAvgCTEBuilder.SelectMore(fmt.Sprintf("avgIf(value, isNaN(value) = 0) OVER (PARTITION BY %s) AS avg_val", strings.Join(groupByKeys, ",")))
|
||||
withAvgCTEBuilder.From(latestCTE)
|
||||
withAvgCTEQuery, _ := withAvgCTEBuilder.BuildWithFlavor(sqlbuilder.ClickHouse) // no args so second return param is ignored
|
||||
withAvgCTE := fmt.Sprintf("__with_avg_cte AS (%s)", withAvgCTEQuery)
|
||||
return withAvgCTE
|
||||
}
|
||||
|
||||
/*
|
||||
this receives the __with_avg_cte that has columns ts, value, a column each for all the group by keys, and avg_val which has the avg value for the group the row belongs in
|
||||
it creates a CTE (__with_group_rank_cte) that adds a column group_rank that ranks each group based on the order by keys (or by avg val if there are none)
|
||||
*/
|
||||
func (b *MetricQueryStatementBuilder) buildCTEWithGroupRank(
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
|
||||
) string {
|
||||
withGroupByCTEBuilder := sqlbuilder.NewSelectBuilder()
|
||||
withGroupByCTEBuilder.Select("*")
|
||||
|
||||
windowOrder := []string{}
|
||||
orderedKeys := map[string]struct{}{} // this will be used to add the remaining keys as tie breakers in the end
|
||||
if len(query.Order) > 0 {
|
||||
for _, o := range query.Order {
|
||||
key := o.Key.Name
|
||||
if isMetricAggOrderByKey(key, query.Aggregations[0]) {
|
||||
windowOrder = append(windowOrder, fmt.Sprintf("avg_val %s", o.Direction.StringValue()))
|
||||
} else {
|
||||
windowOrder = append(windowOrder, fmt.Sprintf("`%s` %s", key, o.Direction.StringValue()))
|
||||
orderedKeys[fmt.Sprintf("`%s`", key)] = struct{}{}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
windowOrder = append(windowOrder, "avg_val DESC")
|
||||
}
|
||||
groupByKeys := querybuilder.GroupByKeys(query.GroupBy)
|
||||
for _, gk := range groupByKeys { // keys that haven't been added via order by keys will be added at the end as tie breakers
|
||||
if _, ok := orderedKeys[gk]; !ok {
|
||||
windowOrder = append(windowOrder, fmt.Sprintf("%s ASC", gk))
|
||||
}
|
||||
}
|
||||
withGroupByCTEBuilder.SelectMore(fmt.Sprintf("dense_rank() OVER (ORDER BY %s) AS group_rank", strings.Join(windowOrder, ",")))
|
||||
|
||||
withGroupByCTEBuilder.From("__with_avg_cte")
|
||||
withGroupRankCTEQuery, _ := withGroupByCTEBuilder.BuildWithFlavor(sqlbuilder.ClickHouse) // no args so second return param is ignored
|
||||
withGroupRankCTE := fmt.Sprintf("__with_group_rank_cte AS (%s)", withGroupRankCTEQuery)
|
||||
return withGroupRankCTE
|
||||
}
|
||||
|
||||
@@ -15,16 +15,17 @@ import (
|
||||
)
|
||||
|
||||
func TestStatementBuilder(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
requestType qbtypes.RequestType
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]
|
||||
expected qbtypes.Statement
|
||||
expectedErr error
|
||||
}{
|
||||
type baseQuery struct {
|
||||
name string
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]
|
||||
orderKey string
|
||||
args []any
|
||||
cte string
|
||||
}
|
||||
|
||||
bases := []baseQuery{
|
||||
{
|
||||
name: "test_cumulative_rate_sum",
|
||||
requestType: qbtypes.RequestTypeTimeSeries,
|
||||
name: "cumulative_rate_sum",
|
||||
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
|
||||
@@ -40,24 +41,16 @@ func TestStatementBuilder(t *testing.T) {
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "service.name = 'cartservice'",
|
||||
},
|
||||
Limit: 10,
|
||||
GroupBy: []qbtypes.GroupByKey{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
},
|
||||
},
|
||||
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
|
||||
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
|
||||
},
|
||||
expectedErr: nil,
|
||||
orderKey: "service.name",
|
||||
args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
|
||||
cte: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`)",
|
||||
},
|
||||
{
|
||||
name: "test_cumulative_rate_sum_with_mat_column",
|
||||
requestType: qbtypes.RequestTypeTimeSeries,
|
||||
name: "cumulative_rate_sum_with_mat_column",
|
||||
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
|
||||
@@ -73,24 +66,16 @@ func TestStatementBuilder(t *testing.T) {
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "materialized.key.name REGEXP 'cartservice' OR service.name = 'cartservice'",
|
||||
},
|
||||
Limit: 10,
|
||||
GroupBy: []qbtypes.GroupByKey{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
},
|
||||
},
|
||||
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND (match(JSONExtractString(labels, 'materialized.key.name'), ?) OR JSONExtractString(labels, 'service.name') = ?) GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
|
||||
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
|
||||
},
|
||||
expectedErr: nil,
|
||||
orderKey: "service.name",
|
||||
args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
|
||||
cte: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND (match(JSONExtractString(labels, 'materialized.key.name'), ?) OR JSONExtractString(labels, 'service.name') = ?) GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`)",
|
||||
},
|
||||
{
|
||||
name: "test_delta_rate_sum",
|
||||
requestType: qbtypes.RequestTypeTimeSeries,
|
||||
name: "delta_rate_sum",
|
||||
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
|
||||
@@ -106,24 +91,16 @@ func TestStatementBuilder(t *testing.T) {
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "service.name = 'cartservice'",
|
||||
},
|
||||
Limit: 10,
|
||||
GroupBy: []qbtypes.GroupByKey{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
},
|
||||
},
|
||||
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
|
||||
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_calls_total", uint64(1747947390000), uint64(1747983420000)},
|
||||
},
|
||||
expectedErr: nil,
|
||||
orderKey: "service.name",
|
||||
args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_calls_total", uint64(1747947390000), uint64(1747983420000)},
|
||||
cte: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`)",
|
||||
},
|
||||
{
|
||||
name: "test_histogram_percentile1",
|
||||
requestType: qbtypes.RequestTypeTimeSeries,
|
||||
name: "histogram_percentile1",
|
||||
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
|
||||
@@ -139,24 +116,38 @@ func TestStatementBuilder(t *testing.T) {
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "service.name = 'cartservice'",
|
||||
},
|
||||
Limit: 10,
|
||||
GroupBy: []qbtypes.GroupByKey{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
},
|
||||
},
|
||||
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts ORDER BY `service.name`, ts",
|
||||
Args: []any{"signoz_latency", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_latency", uint64(1747947390000), uint64(1747983420000)},
|
||||
},
|
||||
expectedErr: nil,
|
||||
orderKey: "service.name",
|
||||
args: []any{"signoz_latency", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_latency", uint64(1747947390000), uint64(1747983420000)},
|
||||
cte: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`, `le`), __histogram_cte AS (SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts)",
|
||||
},
|
||||
{
|
||||
name: "test_gauge_avg_sum",
|
||||
requestType: qbtypes.RequestTypeTimeSeries,
|
||||
name: "histogram_percentile2",
|
||||
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "http_server_duration_bucket",
|
||||
Type: metrictypes.HistogramType,
|
||||
Temporality: metrictypes.Cumulative,
|
||||
TimeAggregation: metrictypes.TimeAggregationRate,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationPercentile95,
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{
|
||||
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
|
||||
},
|
||||
},
|
||||
orderKey: "service.name",
|
||||
args: []any{"http_server_duration_bucket", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "http_server_duration_bucket", uint64(1747947360000), uint64(1747983420000), 0},
|
||||
cte: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, `le`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name`, `le` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, `le`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`, `le`), __histogram_cte AS (SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts)",
|
||||
},
|
||||
{
|
||||
name: "gauge_avg_sum",
|
||||
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
|
||||
@@ -172,53 +163,83 @@ func TestStatementBuilder(t *testing.T) {
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "host.name = 'big-data-node-1'",
|
||||
},
|
||||
Limit: 10,
|
||||
GroupBy: []qbtypes.GroupByKey{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "host.name",
|
||||
},
|
||||
},
|
||||
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "host.name"}},
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `host.name`, avg(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'host.name') AS `host.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'host.name') = ? GROUP BY fingerprint, `host.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `host.name`, ts",
|
||||
Args: []any{"system.memory.usage", uint64(1747936800000), uint64(1747983420000), "unspecified", false, "big-data-node-1", "system.memory.usage", uint64(1747947390000), uint64(1747983420000), 0},
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "test_histogram_percentile2",
|
||||
requestType: qbtypes.RequestTypeTimeSeries,
|
||||
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "http_server_duration_bucket",
|
||||
Type: metrictypes.HistogramType,
|
||||
Temporality: metrictypes.Cumulative,
|
||||
TimeAggregation: metrictypes.TimeAggregationRate,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationPercentile95,
|
||||
},
|
||||
},
|
||||
Limit: 10,
|
||||
GroupBy: []qbtypes.GroupByKey{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, `le`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name`, `le` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, `le`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts ORDER BY `service.name`, ts",
|
||||
Args: []any{"http_server_duration_bucket", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "http_server_duration_bucket", uint64(1747947360000), uint64(1747983420000), 0},
|
||||
},
|
||||
expectedErr: nil,
|
||||
orderKey: "host.name",
|
||||
args: []any{"system.memory.usage", uint64(1747936800000), uint64(1747983420000), "unspecified", false, "big-data-node-1", "system.memory.usage", uint64(1747947390000), uint64(1747983420000), 0},
|
||||
cte: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `host.name`, avg(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'host.name') AS `host.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'host.name') = ? GROUP BY fingerprint, `host.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`)",
|
||||
},
|
||||
}
|
||||
|
||||
type variant struct {
|
||||
name string
|
||||
limit int
|
||||
hasOrder bool
|
||||
}
|
||||
|
||||
variants := []variant{
|
||||
{"with_limits", 10, false},
|
||||
{"without_limits", 0, false},
|
||||
{"with_order_by", 0, true},
|
||||
{"with_order_by_and_limits", 10, true},
|
||||
}
|
||||
|
||||
sumMetricsFinalSelects := map[string]string{
|
||||
"with_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
|
||||
"without_limits": " SELECT * FROM __spatial_aggregation_cte ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
|
||||
"with_order_by": " SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name` asc, ts ASC",
|
||||
"with_order_by_and_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY `service.name` asc LIMIT 10) ORDER BY `service.name` asc, ts ASC",
|
||||
}
|
||||
|
||||
histogramMetricsFinalSelects := map[string]string{
|
||||
"with_limits": " SELECT * FROM __histogram_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __histogram_cte GROUP BY `service.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
|
||||
"without_limits": " SELECT * FROM __histogram_cte ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
|
||||
"with_order_by": " SELECT * FROM __histogram_cte ORDER BY `service.name` asc, ts ASC",
|
||||
"with_order_by_and_limits": " SELECT * FROM __histogram_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __histogram_cte GROUP BY `service.name` ORDER BY `service.name` asc LIMIT 10) ORDER BY `service.name` asc, ts ASC",
|
||||
}
|
||||
|
||||
// expectedFinalSelects maps "base/variant" to the final SELECT portion after the CTE.
|
||||
// The full expected query is: base.cte + expectedFinalSelects[name]
|
||||
expectedFinalSelects := map[string]string{
|
||||
// cumulative_rate_sum
|
||||
"cumulative_rate_sum/with_limits": sumMetricsFinalSelects["with_limits"],
|
||||
"cumulative_rate_sum/without_limits": sumMetricsFinalSelects["without_limits"],
|
||||
"cumulative_rate_sum/with_order_by": sumMetricsFinalSelects["with_order_by"],
|
||||
"cumulative_rate_sum/with_order_by_and_limits": sumMetricsFinalSelects["with_order_by_and_limits"],
|
||||
|
||||
// cumulative_rate_sum_with_mat_column
|
||||
"cumulative_rate_sum_with_mat_column/with_limits": sumMetricsFinalSelects["with_limits"],
|
||||
"cumulative_rate_sum_with_mat_column/without_limits": sumMetricsFinalSelects["without_limits"],
|
||||
"cumulative_rate_sum_with_mat_column/with_order_by": sumMetricsFinalSelects["with_order_by"],
|
||||
"cumulative_rate_sum_with_mat_column/with_order_by_and_limits": sumMetricsFinalSelects["with_order_by_and_limits"],
|
||||
|
||||
// delta_rate_sum
|
||||
"delta_rate_sum/with_limits": sumMetricsFinalSelects["with_limits"],
|
||||
"delta_rate_sum/without_limits": sumMetricsFinalSelects["without_limits"],
|
||||
"delta_rate_sum/with_order_by": sumMetricsFinalSelects["with_order_by"],
|
||||
"delta_rate_sum/with_order_by_and_limits": sumMetricsFinalSelects["with_order_by_and_limits"],
|
||||
|
||||
// histogram_percentile1
|
||||
"histogram_percentile1/with_limits": histogramMetricsFinalSelects["with_limits"],
|
||||
"histogram_percentile1/without_limits": histogramMetricsFinalSelects["without_limits"],
|
||||
"histogram_percentile1/with_order_by": histogramMetricsFinalSelects["with_order_by"],
|
||||
"histogram_percentile1/with_order_by_and_limits": histogramMetricsFinalSelects["with_order_by_and_limits"],
|
||||
|
||||
// histogram_percentile2
|
||||
"histogram_percentile2/with_limits": histogramMetricsFinalSelects["with_limits"],
|
||||
"histogram_percentile2/without_limits": histogramMetricsFinalSelects["without_limits"],
|
||||
"histogram_percentile2/with_order_by": histogramMetricsFinalSelects["with_order_by"],
|
||||
"histogram_percentile2/with_order_by_and_limits": histogramMetricsFinalSelects["with_order_by_and_limits"],
|
||||
|
||||
// gauge_avg_sum
|
||||
"gauge_avg_sum/with_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`host.name`) IN (SELECT `host.name` FROM __spatial_aggregation_cte GROUP BY `host.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `host.name`) DESC, `host.name`, ts ASC",
|
||||
"gauge_avg_sum/without_limits": " SELECT * FROM __spatial_aggregation_cte ORDER BY avg(value) OVER (PARTITION BY `host.name`) DESC, `host.name`, ts ASC",
|
||||
"gauge_avg_sum/with_order_by": " SELECT * FROM __spatial_aggregation_cte ORDER BY `host.name` asc, ts ASC",
|
||||
"gauge_avg_sum/with_order_by_and_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`host.name`) IN (SELECT `host.name` FROM __spatial_aggregation_cte GROUP BY `host.name` ORDER BY `host.name` asc LIMIT 10) ORDER BY `host.name` asc, ts ASC",
|
||||
}
|
||||
|
||||
fm := NewFieldMapper()
|
||||
cb := NewConditionBuilder(fm)
|
||||
mockMetadataStore := telemetrytypestest.NewMockMetadataStore()
|
||||
@@ -227,15 +248,13 @@ func TestStatementBuilder(t *testing.T) {
|
||||
t.Fatalf("failed to load field keys: %v", err)
|
||||
}
|
||||
mockMetadataStore.KeysMap = keys
|
||||
// NOTE: LoadFieldKeysFromJSON doesn't set Materialized field
|
||||
// for keys, so we have to set it manually here for testing
|
||||
if _, ok := mockMetadataStore.KeysMap["materialized.key.name"]; ok {
|
||||
if len(mockMetadataStore.KeysMap["materialized.key.name"]) > 0 {
|
||||
mockMetadataStore.KeysMap["materialized.key.name"][0].Materialized = true
|
||||
}
|
||||
}
|
||||
|
||||
flagger, err := flagger.New(context.Background(), instrumentationtest.New().ToProviderSettings(), flagger.Config{}, flagger.MustNewRegistry())
|
||||
fl, err := flagger.New(context.Background(), instrumentationtest.New().ToProviderSettings(), flagger.Config{}, flagger.MustNewRegistry())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create flagger: %v", err)
|
||||
}
|
||||
@@ -245,23 +264,30 @@ func TestStatementBuilder(t *testing.T) {
|
||||
mockMetadataStore,
|
||||
fm,
|
||||
cb,
|
||||
flagger,
|
||||
fl,
|
||||
)
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
for _, b := range bases {
|
||||
for _, v := range variants {
|
||||
name := b.name + "/" + v.name
|
||||
t.Run(name, func(t *testing.T) {
|
||||
q := b.query
|
||||
q.Limit = v.limit
|
||||
if v.hasOrder {
|
||||
q.Order = []qbtypes.OrderBy{
|
||||
{
|
||||
Key: qbtypes.OrderByKey{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: b.orderKey}},
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
q, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, c.requestType, c.query, nil)
|
||||
result, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, qbtypes.RequestTypeTimeSeries, q, nil)
|
||||
|
||||
if c.expectedErr != nil {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), c.expectedErr.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.expected.Query, q.Query)
|
||||
require.Equal(t, c.expected.Args, q.Args)
|
||||
require.Equal(t, c.expected.Warnings, q.Warnings)
|
||||
}
|
||||
})
|
||||
require.Equal(t, b.cte+expectedFinalSelects[name], result.Query)
|
||||
require.Equal(t, b.args, result.Args)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
package roletypes
|
||||
package authtypes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
openfgav1 "github.com/openfga/api/proto/openfga/v1"
|
||||
"github.com/uptrace/bun"
|
||||
@@ -51,7 +51,7 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
TypeableResourcesRoles = authtypes.MustNewTypeableMetaResources(authtypes.MustNewName("roles"))
|
||||
TypeableResourcesRoles = MustNewTypeableMetaResources(MustNewName("roles"))
|
||||
)
|
||||
|
||||
type StorableRole struct {
|
||||
@@ -194,20 +194,20 @@ func (role *PatchableRole) UnmarshalJSON(data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetAdditionTuples(name string, orgID valuer.UUID, relation authtypes.Relation, additions []*authtypes.Object) ([]*openfgav1.TupleKey, error) {
|
||||
func GetAdditionTuples(name string, orgID valuer.UUID, relation Relation, additions []*Object) ([]*openfgav1.TupleKey, error) {
|
||||
tuples := make([]*openfgav1.TupleKey, 0)
|
||||
|
||||
for _, object := range additions {
|
||||
typeable := authtypes.MustNewTypeableFromType(object.Resource.Type, object.Resource.Name)
|
||||
typeable := MustNewTypeableFromType(object.Resource.Type, object.Resource.Name)
|
||||
transactionTuples, err := typeable.Tuples(
|
||||
authtypes.MustNewSubject(
|
||||
authtypes.TypeableRole,
|
||||
MustNewSubject(
|
||||
TypeableRole,
|
||||
name,
|
||||
orgID,
|
||||
&authtypes.RelationAssignee,
|
||||
&RelationAssignee,
|
||||
),
|
||||
relation,
|
||||
[]authtypes.Selector{object.Selector},
|
||||
[]Selector{object.Selector},
|
||||
orgID,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -220,20 +220,20 @@ func GetAdditionTuples(name string, orgID valuer.UUID, relation authtypes.Relati
|
||||
return tuples, nil
|
||||
}
|
||||
|
||||
func GetDeletionTuples(name string, orgID valuer.UUID, relation authtypes.Relation, deletions []*authtypes.Object) ([]*openfgav1.TupleKey, error) {
|
||||
func GetDeletionTuples(name string, orgID valuer.UUID, relation Relation, deletions []*Object) ([]*openfgav1.TupleKey, error) {
|
||||
tuples := make([]*openfgav1.TupleKey, 0)
|
||||
|
||||
for _, object := range deletions {
|
||||
typeable := authtypes.MustNewTypeableFromType(object.Resource.Type, object.Resource.Name)
|
||||
typeable := MustNewTypeableFromType(object.Resource.Type, object.Resource.Name)
|
||||
transactionTuples, err := typeable.Tuples(
|
||||
authtypes.MustNewSubject(
|
||||
authtypes.TypeableRole,
|
||||
MustNewSubject(
|
||||
TypeableRole,
|
||||
name,
|
||||
orgID,
|
||||
&authtypes.RelationAssignee,
|
||||
&RelationAssignee,
|
||||
),
|
||||
relation,
|
||||
[]authtypes.Selector{object.Selector},
|
||||
[]Selector{object.Selector},
|
||||
orgID,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -254,3 +254,15 @@ func MustGetSigNozManagedRoleFromExistingRole(role types.Role) string {
|
||||
|
||||
return managedRole
|
||||
}
|
||||
|
||||
type RoleStore interface {
|
||||
Create(context.Context, *StorableRole) error
|
||||
Get(context.Context, valuer.UUID, valuer.UUID) (*StorableRole, error)
|
||||
GetByOrgIDAndName(context.Context, valuer.UUID, string) (*StorableRole, error)
|
||||
List(context.Context, valuer.UUID) ([]*StorableRole, error)
|
||||
ListByOrgIDAndNames(context.Context, valuer.UUID, []string) ([]*StorableRole, error)
|
||||
ListByOrgIDAndIDs(context.Context, valuer.UUID, []valuer.UUID) ([]*StorableRole, error)
|
||||
Update(context.Context, valuer.UUID, *StorableRole) error
|
||||
Delete(context.Context, valuer.UUID, valuer.UUID) error
|
||||
RunInTx(context.Context, func(ctx context.Context) error) error
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package roletypes
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type Store interface {
|
||||
Create(context.Context, *StorableRole) error
|
||||
Get(context.Context, valuer.UUID, valuer.UUID) (*StorableRole, error)
|
||||
GetByOrgIDAndName(context.Context, valuer.UUID, string) (*StorableRole, error)
|
||||
List(context.Context, valuer.UUID) ([]*StorableRole, error)
|
||||
ListByOrgIDAndNames(context.Context, valuer.UUID, []string) ([]*StorableRole, error)
|
||||
ListByOrgIDAndIDs(context.Context, valuer.UUID, []valuer.UUID) ([]*StorableRole, error)
|
||||
Update(context.Context, valuer.UUID, *StorableRole) error
|
||||
Delete(context.Context, valuer.UUID, valuer.UUID) error
|
||||
RunInTx(context.Context, func(ctx context.Context) error) error
|
||||
}
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/roletypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
@@ -102,10 +102,10 @@ func NewServiceAccountFromStorables(storableServiceAccount *StorableServiceAccou
|
||||
}
|
||||
}
|
||||
|
||||
func NewServiceAccountsFromRoles(storableServiceAccounts []*StorableServiceAccount, roles []*roletypes.Role, serviceAccountIDToRoleIDsMap map[string][]valuer.UUID) []*ServiceAccount {
|
||||
func NewServiceAccountsFromRoles(storableServiceAccounts []*StorableServiceAccount, roles []*authtypes.Role, serviceAccountIDToRoleIDsMap map[string][]valuer.UUID) []*ServiceAccount {
|
||||
serviceAccounts := make([]*ServiceAccount, 0, len(storableServiceAccounts))
|
||||
|
||||
roleIDToRole := make(map[string]*roletypes.Role, len(roles))
|
||||
roleIDToRole := make(map[string]*authtypes.Role, len(roles))
|
||||
for _, role := range roles {
|
||||
roleIDToRole[role.ID.String()] = role
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/roletypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
@@ -19,7 +19,7 @@ type StorableServiceAccountRole struct {
|
||||
RoleID string `bun:"role_id"`
|
||||
}
|
||||
|
||||
func NewStorableServiceAccountRoles(serviceAccountID valuer.UUID, roles []*roletypes.Role) []*StorableServiceAccountRole {
|
||||
func NewStorableServiceAccountRoles(serviceAccountID valuer.UUID, roles []*authtypes.Role) []*StorableServiceAccountRole {
|
||||
storableServiceAccountRoles := make([]*StorableServiceAccountRole, len(roles))
|
||||
for idx, role := range roles {
|
||||
storableServiceAccountRoles[idx] = &StorableServiceAccountRole{
|
||||
@@ -38,7 +38,7 @@ func NewStorableServiceAccountRoles(serviceAccountID valuer.UUID, roles []*rolet
|
||||
return storableServiceAccountRoles
|
||||
}
|
||||
|
||||
func NewRolesFromStorableServiceAccountRoles(storable []*StorableServiceAccountRole, roles []*roletypes.Role) ([]string, error) {
|
||||
func NewRolesFromStorableServiceAccountRoles(storable []*StorableServiceAccountRole, roles []*authtypes.Role) ([]string, error) {
|
||||
roleIDToName := make(map[string]string, len(roles))
|
||||
for _, role := range roles {
|
||||
roleIDToName[role.ID.String()] = role.Name
|
||||
|
||||
@@ -45,6 +45,8 @@ type MetadataStore interface {
|
||||
|
||||
// GetFirstSeenFromMetricMetadata gets the first seen timestamp for a metric metadata lookup key.
|
||||
GetFirstSeenFromMetricMetadata(ctx context.Context, lookupKeys []MetricMetadataLookupKey) (map[MetricMetadataLookupKey]int64, error)
|
||||
|
||||
FetchLastSeenInfoMulti(ctx context.Context, metricNames ...string) (map[string]int64, error)
|
||||
}
|
||||
|
||||
type MetricMetadataLookupKey struct {
|
||||
|
||||
@@ -342,3 +342,7 @@ func (m *MockMetadataStore) SetFirstSeenFromMetricMetadata(firstSeenMap map[tele
|
||||
m.LookupKeysMap[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MockMetadataStore) FetchLastSeenInfoMulti(ctx context.Context, metricNames ...string) (map[string]int64, error) {
|
||||
return make(map[string]int64), nil
|
||||
}
|
||||
|
||||
@@ -58,6 +58,8 @@ def build_builder_query(
|
||||
step_interval: int = DEFAULT_STEP_INTERVAL,
|
||||
group_by: Optional[List[str]] = None,
|
||||
filter_expression: Optional[str] = None,
|
||||
order_by: Optional[List[Dict]] = None,
|
||||
limit: Optional[int] = None,
|
||||
functions: Optional[List[Dict]] = None,
|
||||
disabled: bool = False,
|
||||
) -> Dict:
|
||||
@@ -93,6 +95,12 @@ def build_builder_query(
|
||||
if filter_expression:
|
||||
spec["filter"] = {"expression": filter_expression}
|
||||
|
||||
if order_by:
|
||||
spec["order"] = order_by
|
||||
|
||||
if limit is not None:
|
||||
spec["limit"] = limit
|
||||
|
||||
if functions:
|
||||
spec["functions"] = functions
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, List
|
||||
|
||||
@@ -7,6 +8,7 @@ from sqlalchemy import sql
|
||||
from wiremock.resources.mappings import Mapping
|
||||
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD, add_license
|
||||
from fixtures.metrics import Metrics
|
||||
from fixtures.types import Operation, SigNoz, TestContainerDocker
|
||||
|
||||
|
||||
@@ -74,9 +76,37 @@ def test_public_dashboard_widget_query_range(
|
||||
signoz: SigNoz,
|
||||
create_user_admin: Operation, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_metrics: Callable[[List[Metrics]], None],
|
||||
):
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Insert metric data so the widget query returns results instead of 404
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
metrics: List[Metrics] = [
|
||||
Metrics(
|
||||
metric_name="container.cpu.time",
|
||||
labels={"service": "test-service"},
|
||||
timestamp=now - timedelta(minutes=5),
|
||||
value=100.0,
|
||||
temporality="Cumulative",
|
||||
),
|
||||
Metrics(
|
||||
metric_name="container.cpu.time",
|
||||
labels={"service": "test-service"},
|
||||
timestamp=now - timedelta(minutes=3),
|
||||
value=200.0,
|
||||
temporality="Cumulative",
|
||||
),
|
||||
Metrics(
|
||||
metric_name="container.cpu.time",
|
||||
labels={"service": "test-service"},
|
||||
timestamp=now - timedelta(minutes=1),
|
||||
value=300.0,
|
||||
temporality="Cumulative",
|
||||
),
|
||||
]
|
||||
insert_metrics(metrics)
|
||||
|
||||
dashboard_req = {
|
||||
"title": "Test Widget Query Range Dashboard",
|
||||
"description": "For testing widget query range",
|
||||
|
||||
@@ -10,12 +10,16 @@ from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.metrics import Metrics
|
||||
from fixtures.querier import (
|
||||
assert_minutely_bucket_values,
|
||||
build_builder_query,
|
||||
find_named_result,
|
||||
index_series_by_label,
|
||||
make_query_request,
|
||||
)
|
||||
from fixtures.utils import get_testdata_file_path
|
||||
|
||||
FILL_GAPS = "fillGaps"
|
||||
FILL_ZERO = "fillZero"
|
||||
HISTOGRAM_FILE = get_testdata_file_path("histogram_data_1h.jsonl")
|
||||
|
||||
|
||||
def _build_format_options(fill_mode: str) -> Dict[str, Any]:
|
||||
@@ -580,3 +584,39 @@ def test_metrics_fill_formula_with_group_by(
|
||||
expected_by_ts=expectations[group],
|
||||
context=f"metrics/{fill_mode}/F1/{group}",
|
||||
)
|
||||
|
||||
def test_histogram_p90_returns_404_outside_data_window(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_metrics: Callable[[List[Metrics]], None],
|
||||
) -> None:
|
||||
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
metric_name = "test_p90_last_seen_bucket"
|
||||
|
||||
metrics = Metrics.load_from_file(
|
||||
HISTOGRAM_FILE,
|
||||
base_time=now - timedelta(minutes=90),
|
||||
metric_name_override=metric_name,
|
||||
)
|
||||
insert_metrics(metrics)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
query = build_builder_query(
|
||||
"A",
|
||||
metric_name,
|
||||
"doesnotreallymatter",
|
||||
"p90",
|
||||
)
|
||||
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
|
||||
start_2h = int((now - timedelta(hours=2)).timestamp() * 1000)
|
||||
response = make_query_request(signoz, token, start_2h, end_ms, [query])
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.json()["status"] == "success"
|
||||
|
||||
start_15m = int((now - timedelta(minutes=15)).timestamp() * 1000)
|
||||
response = make_query_request(signoz, token, start_15m, end_ms, [query])
|
||||
assert response.status_code == HTTPStatus.NOT_FOUND
|
||||
|
||||
@@ -2,16 +2,20 @@
|
||||
Look at the cumulative_counters_1h.jsonl file for the relevant data
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Any, Callable, List
|
||||
from typing import Any, Callable, List, Optional, Union
|
||||
|
||||
import pytest
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.metrics import Metrics
|
||||
from fixtures.querier import (
|
||||
build_builder_query,
|
||||
build_order_by,
|
||||
get_all_series,
|
||||
get_series_values,
|
||||
make_query_request,
|
||||
@@ -71,16 +75,200 @@ def test_rate_with_steady_values_and_reset(
|
||||
assert v["value"] >= 0, f"Rate should not be negative: {v['value']}"
|
||||
|
||||
|
||||
def _assert_endpoint_rate_values(endpoint_values: dict) -> None:
|
||||
# /health: 60 data points (t01-t60), steady +10/min
|
||||
# rate = 10/60 = 0.167
|
||||
if "/health" in endpoint_values:
|
||||
health_values = endpoint_values["/health"]
|
||||
assert (
|
||||
len(health_values) >= 58
|
||||
), f"Expected >= 58 values for /health, got {len(health_values)}"
|
||||
count_steady_health = sum(1 for v in health_values if v["value"] == 0.167)
|
||||
assert (
|
||||
count_steady_health >= 57
|
||||
), f"Expected >= 57 steady rate values (0.167) for /health, got {count_steady_health}"
|
||||
# all /health rates should be 0.167 except possibly first/last due to boundaries
|
||||
for v in health_values[1:-1]:
|
||||
assert v["value"] == 0.167, f"Expected /health rate 0.167, got {v['value']}"
|
||||
|
||||
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
|
||||
# rate = 20/60 = 0.333, gap causes lower averaged rate at boundary
|
||||
if "/products" in endpoint_values:
|
||||
products_values = endpoint_values["/products"]
|
||||
assert (
|
||||
len(products_values) >= 49
|
||||
), f"Expected >= 49 values for /products, got {len(products_values)}"
|
||||
count_steady_products = sum(1 for v in products_values if v["value"] == 0.333)
|
||||
# most values should be 0.333, some boundary values differ due to 10-min gap
|
||||
assert (
|
||||
count_steady_products >= 46
|
||||
), f"Expected >= 46 steady rate values (0.333) for /products, got {count_steady_products}"
|
||||
# check that non-0.333 values are due to gap averaging (should be lower)
|
||||
gap_boundary_values = [v["value"] for v in products_values if v["value"] != 0.333]
|
||||
for val in gap_boundary_values:
|
||||
assert (
|
||||
0 < val < 0.333
|
||||
), f"Gap boundary values should be between 0 and 0.333, got {val}"
|
||||
|
||||
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
|
||||
# normal rate = 1/60 = 0.0167, spike rate = 50/60 = 0.833
|
||||
if "/checkout" in endpoint_values:
|
||||
checkout_values = endpoint_values["/checkout"]
|
||||
assert (
|
||||
len(checkout_values) >= 59
|
||||
), f"Expected >= 59 values for /checkout, got {len(checkout_values)}"
|
||||
count_steady_checkout = sum(1 for v in checkout_values if v["value"] == 0.0167)
|
||||
assert (
|
||||
count_steady_checkout >= 53
|
||||
), f"Expected >= 53 steady rate values (0.0167) for /checkout, got {count_steady_checkout}"
|
||||
# check that spike values exist (traffic spike +50/min at t40-t44)
|
||||
count_spike_checkout = sum(1 for v in checkout_values if v["value"] == 0.833)
|
||||
assert (
|
||||
count_spike_checkout >= 4
|
||||
), f"Expected >= 4 spike rate values (0.833) for /checkout, got {count_spike_checkout}"
|
||||
# spike values should be consecutive
|
||||
spike_indices = [
|
||||
i for i, v in enumerate[Any](checkout_values) if v["value"] == 0.833
|
||||
]
|
||||
assert len(spike_indices) >= 4, f"Expected >= 4 spike indices, got {spike_indices}"
|
||||
for i in range(1, len(spike_indices)):
|
||||
assert (
|
||||
spike_indices[i] == spike_indices[i - 1] + 1
|
||||
), f"Spike indices should be consecutive, got {spike_indices}"
|
||||
|
||||
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
|
||||
# rate = 5/60 = 0.0833
|
||||
# reset at t31 causes: rate at t30 includes gap (lower), t31 has high rate after reset
|
||||
if "/orders" in endpoint_values:
|
||||
orders_values = endpoint_values["/orders"]
|
||||
assert (
|
||||
len(orders_values) >= 58
|
||||
), f"Expected >= 58 values for /orders, got {len(orders_values)}"
|
||||
count_steady_orders = sum(1 for v in orders_values if v["value"] == 0.0833)
|
||||
assert (
|
||||
count_steady_orders >= 55
|
||||
), f"Expected >= 55 steady rate values (0.0833) for /orders, got {count_steady_orders}"
|
||||
# check for counter reset effects - there should be some non-standard values
|
||||
non_standard_orders = [v["value"] for v in orders_values if v["value"] != 0.0833]
|
||||
assert (
|
||||
len(non_standard_orders) >= 2
|
||||
), f"Expected >= 2 non-standard values due to counter reset, got {non_standard_orders}"
|
||||
# post-reset value should be higher (new counter value / interval)
|
||||
high_rate_orders = [v for v in non_standard_orders if v > 0.0833]
|
||||
assert (
|
||||
len(high_rate_orders) >= 1
|
||||
), f"Expected at least one high rate value after counter reset, got {non_standard_orders}"
|
||||
|
||||
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes
|
||||
# Rate = 1/60 = 0.0167 during increment, 0 during flat periods
|
||||
if "/users" in endpoint_values:
|
||||
users_values = endpoint_values["/users"]
|
||||
assert (
|
||||
len(users_values) >= 54
|
||||
), f"Expected >= 54 values for /users, got {len(users_values)}"
|
||||
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
|
||||
# most values should be 0 (flat periods between increments)
|
||||
assert (
|
||||
count_zero_users >= 40
|
||||
), f"Expected >= 40 zero rate values for /users (sparse data), got {count_zero_users}"
|
||||
# non-zero values should be 0.0167 (1/60 increment rate)
|
||||
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
|
||||
count_increment_rate = sum(1 for v in non_zero_users if v == 0.0167)
|
||||
assert (
|
||||
count_increment_rate >= 8
|
||||
), f"Expected >= 8 increment rate values (0.0167) for /users, got {count_increment_rate}"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"metric_suffix,order_by,limit,expected_count,expected_endpoints",
|
||||
[
|
||||
(
|
||||
"no_order",
|
||||
None, # this is equivalent to sum(metric_name)
|
||||
None,
|
||||
5,
|
||||
["/products", "/health", "/checkout", "/orders", "/users"],
|
||||
),
|
||||
(
|
||||
"only_limit",
|
||||
None, # this is equivalent to sum(metric_name)
|
||||
3,
|
||||
3,
|
||||
["/products", "/health", "/checkout"],
|
||||
),
|
||||
(
|
||||
"asc",
|
||||
[build_order_by("endpoint", "asc")],
|
||||
None,
|
||||
5,
|
||||
["/checkout", "/health", "/orders", "/products", "/users"],
|
||||
),
|
||||
(
|
||||
"asc_lim3",
|
||||
[build_order_by("endpoint", "asc")],
|
||||
3,
|
||||
3,
|
||||
["/checkout", "/health", "/orders"],
|
||||
),
|
||||
(
|
||||
"desc",
|
||||
[build_order_by("endpoint", "desc")],
|
||||
None,
|
||||
5,
|
||||
["/users", "/products", "/orders", "/health", "/checkout"],
|
||||
),
|
||||
(
|
||||
"desc_lim3",
|
||||
[build_order_by("endpoint", "desc")],
|
||||
3,
|
||||
3,
|
||||
["/users", "/products", "/orders"],
|
||||
),
|
||||
(
|
||||
"asc_metric_name",
|
||||
[build_order_by("sum(test_rate_groupby_asc_metric_name)", "asc")],
|
||||
None,
|
||||
5,
|
||||
["/users", "/orders", "/checkout", "/health", "/products"],
|
||||
),
|
||||
(
|
||||
"asc_metric_name_lim3",
|
||||
[build_order_by("sum(test_rate_groupby_asc_metric_name_lim3)", "asc")],
|
||||
3,
|
||||
3,
|
||||
["/users", "/orders", "/checkout"],
|
||||
),
|
||||
(
|
||||
"desc_metric_name",
|
||||
[build_order_by("sum(test_rate_groupby_desc_metric_name)", "desc")],
|
||||
None,
|
||||
5,
|
||||
["/products", "/health", "/checkout", "/orders", "/users"],
|
||||
),
|
||||
(
|
||||
"desc_metric_name_lim3",
|
||||
[build_order_by("sum(test_rate_groupby_desc_metric_name_lim3)", "desc")],
|
||||
3,
|
||||
3,
|
||||
["/products", "/health", "/checkout"],
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_rate_group_by_endpoint(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_metrics: Callable[[List[Metrics]], None],
|
||||
metric_suffix: str,
|
||||
order_by: Optional[List],
|
||||
limit: Optional[int],
|
||||
expected_count: int,
|
||||
expected_endpoints: Union[set, List[str]],
|
||||
) -> None:
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
metric_name = "test_rate_groupby"
|
||||
metric_name = f"test_rate_groupby_{metric_suffix}"
|
||||
|
||||
metrics = Metrics.load_from_file(
|
||||
CUMULATIVE_COUNTERS_FILE,
|
||||
@@ -97,6 +285,8 @@ def test_rate_group_by_endpoint(
|
||||
"sum",
|
||||
temporality="cumulative",
|
||||
group_by=["endpoint"],
|
||||
order_by=order_by,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
response = make_query_request(signoz, token, start_ms, end_ms, [query])
|
||||
@@ -105,10 +295,23 @@ def test_rate_group_by_endpoint(
|
||||
data = response.json()
|
||||
all_series = get_all_series(data, "A")
|
||||
|
||||
# Should have 5 different endpoints
|
||||
assert (
|
||||
len(all_series) == 5
|
||||
), f"Expected 5 series for 5 endpoints, got {len(all_series)}"
|
||||
len(all_series) == expected_count
|
||||
), f"Expected {expected_count} series, got {len(all_series)}"
|
||||
|
||||
endpoint_labels = [
|
||||
series.get("labels", [{}])[0].get("value", "unknown")
|
||||
for series in all_series
|
||||
]
|
||||
|
||||
if isinstance(expected_endpoints, set):
|
||||
assert (
|
||||
set(endpoint_labels) == expected_endpoints
|
||||
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_labels)}"
|
||||
else:
|
||||
assert endpoint_labels == expected_endpoints, (
|
||||
f"Expected endpoints {expected_endpoints}, got {endpoint_labels}"
|
||||
)
|
||||
|
||||
# endpoint -> values
|
||||
endpoint_values = {}
|
||||
@@ -117,11 +320,6 @@ def test_rate_group_by_endpoint(
|
||||
values = sorted(series.get("values", []), key=lambda x: x["timestamp"])
|
||||
endpoint_values[endpoint] = values
|
||||
|
||||
expected_endpoints = {"/products", "/health", "/checkout", "/orders", "/users"}
|
||||
assert (
|
||||
set(endpoint_values.keys()) == expected_endpoints
|
||||
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_values.keys())}"
|
||||
|
||||
# at no point rate should be negative
|
||||
for endpoint, values in endpoint_values.items():
|
||||
for v in values:
|
||||
@@ -129,103 +327,4 @@ def test_rate_group_by_endpoint(
|
||||
v["value"] >= 0
|
||||
), f"Rate for {endpoint} should not be negative: {v['value']}"
|
||||
|
||||
# /health: 60 data points (t01-t60), steady +10/min
|
||||
# rate = 10/60 = 0.167
|
||||
health_values = endpoint_values["/health"]
|
||||
assert (
|
||||
len(health_values) >= 58
|
||||
), f"Expected >= 58 values for /health, got {len(health_values)}"
|
||||
count_steady_health = sum(1 for v in health_values if v["value"] == 0.167)
|
||||
assert (
|
||||
count_steady_health >= 57
|
||||
), f"Expected >= 57 steady rate values (0.167) for /health, got {count_steady_health}"
|
||||
# all /health rates should be 0.167 except possibly first/last due to boundaries
|
||||
for v in health_values[1:-1]:
|
||||
assert v["value"] == 0.167, f"Expected /health rate 0.167, got {v['value']}"
|
||||
|
||||
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
|
||||
# rate = 20/60 = 0.333, gap causes lower averaged rate at boundary
|
||||
products_values = endpoint_values["/products"]
|
||||
assert (
|
||||
len(products_values) >= 49
|
||||
), f"Expected >= 49 values for /products, got {len(products_values)}"
|
||||
count_steady_products = sum(1 for v in products_values if v["value"] == 0.333)
|
||||
|
||||
# most values should be 0.333, some boundary values differ due to 10-min gap
|
||||
assert (
|
||||
count_steady_products >= 46
|
||||
), f"Expected >= 46 steady rate values (0.333) for /products, got {count_steady_products}"
|
||||
|
||||
# check that non-0.333 values are due to gap averaging (should be lower)
|
||||
gap_boundary_values = [v["value"] for v in products_values if v["value"] != 0.333]
|
||||
for val in gap_boundary_values:
|
||||
assert (
|
||||
0 < val < 0.333
|
||||
), f"Gap boundary values should be between 0 and 0.333, got {val}"
|
||||
|
||||
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
|
||||
# normal rate = 1/60 = 0.0167, spike rate = 50/60 = 0.833
|
||||
checkout_values = endpoint_values["/checkout"]
|
||||
assert (
|
||||
len(checkout_values) >= 59
|
||||
), f"Expected >= 59 values for /checkout, got {len(checkout_values)}"
|
||||
count_steady_checkout = sum(1 for v in checkout_values if v["value"] == 0.0167)
|
||||
assert (
|
||||
count_steady_checkout >= 53
|
||||
), f"Expected >= 53 steady rate values (0.0167) for /checkout, got {count_steady_checkout}"
|
||||
# check that spike values exist (traffic spike +50/min at t40-t44)
|
||||
count_spike_checkout = sum(1 for v in checkout_values if v["value"] == 0.833)
|
||||
assert (
|
||||
count_spike_checkout >= 4
|
||||
), f"Expected >= 4 spike rate values (0.833) for /checkout, got {count_spike_checkout}"
|
||||
|
||||
# spike values should be consecutive
|
||||
spike_indices = [
|
||||
i for i, v in enumerate[Any](checkout_values) if v["value"] == 0.833
|
||||
]
|
||||
assert len(spike_indices) >= 4, f"Expected >= 4 spike indices, got {spike_indices}"
|
||||
# consecutiveness
|
||||
for i in range(1, len(spike_indices)):
|
||||
assert (
|
||||
spike_indices[i] == spike_indices[i - 1] + 1
|
||||
), f"Spike indices should be consecutive, got {spike_indices}"
|
||||
|
||||
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
|
||||
# rate = 5/60 = 0.0833
|
||||
# reset at t31 causes: rate at t30 includes gap (lower), t31 has high rate after reset
|
||||
orders_values = endpoint_values["/orders"]
|
||||
assert (
|
||||
len(orders_values) >= 58
|
||||
), f"Expected >= 58 values for /orders, got {len(orders_values)}"
|
||||
count_steady_orders = sum(1 for v in orders_values if v["value"] == 0.0833)
|
||||
assert (
|
||||
count_steady_orders >= 55
|
||||
), f"Expected >= 55 steady rate values (0.0833) for /orders, got {count_steady_orders}"
|
||||
# check for counter reset effects - there should be some non-standard values
|
||||
non_standard_orders = [v["value"] for v in orders_values if v["value"] != 0.0833]
|
||||
assert (
|
||||
len(non_standard_orders) >= 2
|
||||
), f"Expected >= 2 non-standard values due to counter reset, got {non_standard_orders}"
|
||||
# post-reset value should be higher (new counter value / interval)
|
||||
high_rate_orders = [v for v in non_standard_orders if v > 0.0833]
|
||||
assert (
|
||||
len(high_rate_orders) >= 1
|
||||
), f"Expected at least one high rate value after counter reset, got {non_standard_orders}"
|
||||
|
||||
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes
|
||||
# Rate = 1/60 = 0.0167 during increment, 0 during flat periods
|
||||
users_values = endpoint_values["/users"]
|
||||
assert (
|
||||
len(users_values) >= 54
|
||||
), f"Expected >= 54 values for /users, got {len(users_values)}"
|
||||
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
|
||||
# most values should be 0 (flat periods between increments)
|
||||
assert (
|
||||
count_zero_users >= 40
|
||||
), f"Expected >= 40 zero rate values for /users (sparse data), got {count_zero_users}"
|
||||
# non-zero values should be 0.0167 (1/60 increment rate)
|
||||
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
|
||||
count_increment_rate = sum(1 for v in non_zero_users if v == 0.0167)
|
||||
assert (
|
||||
count_increment_rate >= 8
|
||||
), f"Expected >= 8 increment rate values (0.0167) for /users, got {count_increment_rate}"
|
||||
_assert_endpoint_rate_values(endpoint_values)
|
||||
|
||||
@@ -5,7 +5,7 @@ Look at the multi_temporality_counters_1h.jsonl file for the relevant data
|
||||
import random
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, List
|
||||
from typing import Callable, List, Optional, Union
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -14,6 +14,7 @@ from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.metrics import Metrics
|
||||
from fixtures.querier import (
|
||||
build_builder_query,
|
||||
build_order_by,
|
||||
get_all_series,
|
||||
get_series_values,
|
||||
make_query_request,
|
||||
@@ -91,6 +92,198 @@ def test_with_steady_values_and_reset(
|
||||
), f"{time_aggregation} should not be negative: {v['value']}"
|
||||
|
||||
|
||||
def _assert_endpoint_group_values( # pylint: disable=too-many-arguments
|
||||
endpoint_values: dict,
|
||||
stable_health_value: float,
|
||||
stable_products_value: float,
|
||||
stable_checkout_value: float,
|
||||
spike_checkout_value: float,
|
||||
stable_orders_value: float,
|
||||
spike_users_value: float,
|
||||
time_aggregation: str,
|
||||
) -> None:
|
||||
# /health: 60 data points (t01-t60), steady +10/min
|
||||
if "/health" in endpoint_values:
|
||||
health_values = endpoint_values["/health"]
|
||||
assert (
|
||||
len(health_values) >= 58
|
||||
), f"Expected >= 58 values for /health, got {len(health_values)}"
|
||||
count_steady_health = sum(
|
||||
1 for v in health_values if v["value"] == stable_health_value
|
||||
)
|
||||
assert (
|
||||
count_steady_health >= 57
|
||||
), f"Expected >= 57 steady rate values ({stable_health_value}) for /health, got {count_steady_health}"
|
||||
# all /health rates should be stable except possibly first/last due to boundaries
|
||||
for v in health_values[1:-1]:
|
||||
assert (
|
||||
v["value"] == stable_health_value
|
||||
), f"Expected /health rate {stable_health_value}, got {v['value']}"
|
||||
|
||||
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
|
||||
if "/products" in endpoint_values:
|
||||
products_values = endpoint_values["/products"]
|
||||
assert (
|
||||
len(products_values) >= 49
|
||||
), f"Expected >= 49 values for /products, got {len(products_values)}"
|
||||
count_steady_products = sum(
|
||||
1 for v in products_values if v["value"] == stable_products_value
|
||||
)
|
||||
# most values should be stable, some boundary values differ due to 10-min gap
|
||||
assert (
|
||||
count_steady_products >= 46
|
||||
), f"Expected >= 46 steady rate values ({stable_products_value}) for /products, got {count_steady_products}"
|
||||
# check that non-stable values are due to gap averaging (should be lower)
|
||||
gap_boundary_values = [
|
||||
v["value"] for v in products_values if v["value"] != stable_products_value
|
||||
]
|
||||
for val in gap_boundary_values:
|
||||
assert (
|
||||
0 < val < stable_products_value
|
||||
), f"Gap boundary values should be between 0 and {stable_products_value}, got {val}"
|
||||
|
||||
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
|
||||
if "/checkout" in endpoint_values:
|
||||
checkout_values = endpoint_values["/checkout"]
|
||||
assert (
|
||||
len(checkout_values) >= 59
|
||||
), f"Expected >= 59 values for /checkout, got {len(checkout_values)}"
|
||||
count_steady_checkout = sum(
|
||||
1 for v in checkout_values if v["value"] == stable_checkout_value
|
||||
)
|
||||
assert (
|
||||
count_steady_checkout >= 53
|
||||
), f"Expected >= 53 steady {time_aggregation} values ({stable_checkout_value}) for /checkout, got {count_steady_checkout}"
|
||||
# check that spike values exist (traffic spike +50/min at t40-t44)
|
||||
count_spike_checkout = sum(
|
||||
1 for v in checkout_values if v["value"] == spike_checkout_value
|
||||
)
|
||||
assert (
|
||||
count_spike_checkout >= 4
|
||||
), f"Expected >= 4 spike {time_aggregation} values ({spike_checkout_value}) for /checkout, got {count_spike_checkout}"
|
||||
# spike values should be consecutive
|
||||
spike_indices = [
|
||||
i for i, v in enumerate(checkout_values) if v["value"] == spike_checkout_value
|
||||
]
|
||||
assert len(spike_indices) >= 4, f"Expected >= 4 spike indices, got {spike_indices}"
|
||||
for i in range(1, len(spike_indices)):
|
||||
assert (
|
||||
spike_indices[i] == spike_indices[i - 1] + 1
|
||||
), f"Spike indices should be consecutive, got {spike_indices}"
|
||||
|
||||
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
|
||||
# reset at t31 causes: rate/increase at t30 includes gap (lower), t31 has high rate after reset
|
||||
if "/orders" in endpoint_values:
|
||||
orders_values = endpoint_values["/orders"]
|
||||
assert (
|
||||
len(orders_values) >= 58
|
||||
), f"Expected >= 58 values for /orders, got {len(orders_values)}"
|
||||
count_steady_orders = sum(
|
||||
1 for v in orders_values if v["value"] == stable_orders_value
|
||||
)
|
||||
assert (
|
||||
count_steady_orders >= 55
|
||||
), f"Expected >= 55 steady {time_aggregation} values ({stable_orders_value}) for /orders, got {count_steady_orders}"
|
||||
# check for counter reset effects - there should be some non-standard values
|
||||
non_standard_orders = [
|
||||
v["value"] for v in orders_values if v["value"] != stable_orders_value
|
||||
]
|
||||
assert (
|
||||
len(non_standard_orders) >= 2
|
||||
), f"Expected >= 2 non-standard values due to counter reset, got {non_standard_orders}"
|
||||
# post-reset value should be higher (new counter value / interval)
|
||||
high_rate_orders = [v for v in non_standard_orders if v > stable_orders_value]
|
||||
assert (
|
||||
len(high_rate_orders) >= 1
|
||||
), f"Expected at least one high {time_aggregation} value after counter reset, got {non_standard_orders}"
|
||||
|
||||
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes
|
||||
if "/users" in endpoint_values:
|
||||
users_values = endpoint_values["/users"]
|
||||
assert (
|
||||
len(users_values) >= 54
|
||||
), f"Expected >= 54 values for /users, got {len(users_values)}"
|
||||
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
|
||||
# most values should be 0 (flat periods between increments)
|
||||
assert (
|
||||
count_zero_users >= 40
|
||||
), f"Expected >= 40 zero {time_aggregation} values for /users (sparse data), got {count_zero_users}"
|
||||
# non-zero values should be stable increment rate
|
||||
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
|
||||
count_increment_rate = sum(1 for v in non_zero_users if v == spike_users_value)
|
||||
assert (
|
||||
count_increment_rate >= 8
|
||||
), f"Expected >= 8 increment {time_aggregation} values ({spike_users_value}) for /users, got {count_increment_rate}"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"order_suffix,order_by_spec,limit,expected_count,expected_endpoints",
|
||||
[
|
||||
(
|
||||
"no_order",
|
||||
None,
|
||||
None,
|
||||
5,
|
||||
["/products", "/health", "/checkout", "/orders", "/users"],
|
||||
),
|
||||
(
|
||||
"asc",
|
||||
("endpoint", "asc"),
|
||||
None,
|
||||
5,
|
||||
["/checkout", "/health", "/orders", "/products", "/users"],
|
||||
),
|
||||
(
|
||||
"asc_lim3",
|
||||
("endpoint", "asc"),
|
||||
3,
|
||||
3,
|
||||
["/checkout", "/health", "/orders"],
|
||||
),
|
||||
(
|
||||
"desc",
|
||||
("endpoint", "desc"),
|
||||
None,
|
||||
5,
|
||||
["/users", "/products", "/orders", "/health", "/checkout"],
|
||||
),
|
||||
(
|
||||
"desc_lim3",
|
||||
("endpoint", "desc"),
|
||||
3,
|
||||
3,
|
||||
["/users", "/products", "/orders"],
|
||||
),
|
||||
(
|
||||
"asc_metric_name",
|
||||
("sum_metric", "asc"),
|
||||
None,
|
||||
5,
|
||||
["/users", "/orders", "/checkout", "/health", "/products"],
|
||||
),
|
||||
(
|
||||
"asc_metric_name_lim3",
|
||||
("sum_metric", "asc"),
|
||||
3,
|
||||
3,
|
||||
["/users", "/orders", "/checkout"],
|
||||
),
|
||||
(
|
||||
"desc_metric_name",
|
||||
("sum_metric", "desc"),
|
||||
None,
|
||||
5,
|
||||
["/products", "/health", "/checkout", "/orders", "/users"],
|
||||
),
|
||||
(
|
||||
"desc_metric_name_lim3",
|
||||
("sum_metric", "desc"),
|
||||
3,
|
||||
3,
|
||||
["/products", "/health", "/checkout"],
|
||||
),
|
||||
],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"time_aggregation, stable_health_value, stable_products_value, stable_checkout_value, spike_checkout_value, stable_orders_value, spike_users_value",
|
||||
[
|
||||
@@ -110,11 +303,24 @@ def test_group_by_endpoint(
|
||||
spike_checkout_value: float,
|
||||
stable_orders_value: float,
|
||||
spike_users_value: float,
|
||||
order_suffix: str,
|
||||
order_by_spec: Optional[tuple],
|
||||
limit: Optional[int],
|
||||
expected_count: int,
|
||||
expected_endpoints: Union[set, List[str]],
|
||||
) -> None:
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
metric_name = f"test_{time_aggregation}_groupby"
|
||||
metric_name = f"test_{time_aggregation}_groupby_{order_suffix}"
|
||||
|
||||
# Build order_by at runtime so metric name reflects actual time_aggregation
|
||||
order_by = None
|
||||
if order_by_spec is not None:
|
||||
key, direction = order_by_spec
|
||||
if key == "sum_metric":
|
||||
key = f"sum({metric_name})"
|
||||
order_by = [build_order_by(key, direction)]
|
||||
|
||||
metrics = Metrics.load_from_file(
|
||||
MULTI_TEMPORALITY_FILE,
|
||||
@@ -130,6 +336,8 @@ def test_group_by_endpoint(
|
||||
time_aggregation,
|
||||
"sum",
|
||||
group_by=["endpoint"],
|
||||
order_by=order_by,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
response = make_query_request(signoz, token, start_ms, end_ms, [query])
|
||||
@@ -137,10 +345,23 @@ def test_group_by_endpoint(
|
||||
|
||||
data = response.json()
|
||||
all_series = get_all_series(data, "A")
|
||||
# Should have 5 different endpoints
|
||||
assert (
|
||||
len(all_series) == 5
|
||||
), f"Expected 5 series for 5 endpoints, got {len(all_series)}"
|
||||
len(all_series) == expected_count
|
||||
), f"Expected {expected_count} series, got {len(all_series)}"
|
||||
|
||||
endpoint_labels = [
|
||||
series.get("labels", [{}])[0].get("value", "unknown")
|
||||
for series in all_series
|
||||
]
|
||||
|
||||
if isinstance(expected_endpoints, set):
|
||||
assert (
|
||||
set(endpoint_labels) == expected_endpoints
|
||||
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_labels)}"
|
||||
else:
|
||||
assert endpoint_labels == expected_endpoints, (
|
||||
f"Expected endpoints {expected_endpoints}, got {endpoint_labels}"
|
||||
)
|
||||
|
||||
# endpoint -> values
|
||||
endpoint_values = {}
|
||||
@@ -149,11 +370,6 @@ def test_group_by_endpoint(
|
||||
values = sorted(series.get("values", []), key=lambda x: x["timestamp"])
|
||||
endpoint_values[endpoint] = values
|
||||
|
||||
expected_endpoints = {"/products", "/health", "/checkout", "/orders", "/users"}
|
||||
assert (
|
||||
set(endpoint_values.keys()) == expected_endpoints
|
||||
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_values.keys())}"
|
||||
|
||||
# at no point rate should be negative
|
||||
for endpoint, values in endpoint_values.items():
|
||||
for v in values:
|
||||
@@ -161,117 +377,16 @@ def test_group_by_endpoint(
|
||||
v["value"] >= 0
|
||||
), f"Rate for {endpoint} should not be negative: {v['value']}"
|
||||
|
||||
# /health: 60 data points (t01-t60), steady +10/min
|
||||
health_values = endpoint_values["/health"]
|
||||
assert (
|
||||
len(health_values) >= 58
|
||||
), f"Expected >= 58 values for /health, got {len(health_values)}"
|
||||
count_steady_health = sum(
|
||||
1 for v in health_values if v["value"] == stable_health_value
|
||||
_assert_endpoint_group_values(
|
||||
endpoint_values,
|
||||
stable_health_value,
|
||||
stable_products_value,
|
||||
stable_checkout_value,
|
||||
spike_checkout_value,
|
||||
stable_orders_value,
|
||||
spike_users_value,
|
||||
time_aggregation,
|
||||
)
|
||||
assert (
|
||||
count_steady_health >= 57
|
||||
), f"Expected >= 57 steady rate values ({stable_health_value}) for /health, got {count_steady_health}"
|
||||
# all /health rates should be state except possibly first/last due to boundaries
|
||||
for v in health_values[1:-1]:
|
||||
assert (
|
||||
v["value"] == stable_health_value
|
||||
), f"Expected /health rate {stable_health_value}, got {v['value']}"
|
||||
|
||||
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
|
||||
products_values = endpoint_values["/products"]
|
||||
assert (
|
||||
len(products_values) >= 49
|
||||
), f"Expected >= 49 values for /products, got {len(products_values)}"
|
||||
count_steady_products = sum(
|
||||
1 for v in products_values if v["value"] == stable_products_value
|
||||
)
|
||||
|
||||
# most values should be stable, some boundary values differ due to 10-min gap
|
||||
assert (
|
||||
count_steady_products >= 46
|
||||
), f"Expected >= 46 steady rate values ({stable_products_value}) for /products, got {count_steady_products}"
|
||||
|
||||
# check that non-stable values are due to gap averaging (should be lower)
|
||||
gap_boundary_values = [
|
||||
v["value"] for v in products_values if v["value"] != stable_products_value
|
||||
]
|
||||
for val in gap_boundary_values:
|
||||
assert (
|
||||
0 < val < stable_products_value
|
||||
), f"Gap boundary values should be between 0 and {stable_products_value}, got {val}"
|
||||
|
||||
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
|
||||
checkout_values = endpoint_values["/checkout"]
|
||||
assert (
|
||||
len(checkout_values) >= 59
|
||||
), f"Expected >= 59 values for /checkout, got {len(checkout_values)}"
|
||||
count_steady_checkout = sum(
|
||||
1 for v in checkout_values if v["value"] == stable_checkout_value
|
||||
)
|
||||
assert (
|
||||
count_steady_checkout >= 53
|
||||
), f"Expected >= 53 steady {time_aggregation} values ({stable_checkout_value}) for /checkout, got {count_steady_checkout}"
|
||||
# check that spike values exist (traffic spike +50/min at t40-t44)
|
||||
count_spike_checkout = sum(
|
||||
1 for v in checkout_values if v["value"] == spike_checkout_value
|
||||
)
|
||||
assert (
|
||||
count_spike_checkout >= 4
|
||||
), f"Expected >= 4 spike {time_aggregation} values ({spike_checkout_value}) for /checkout, got {count_spike_checkout}"
|
||||
|
||||
# spike values should be consecutive
|
||||
spike_indices = [
|
||||
i for i, v in enumerate(checkout_values) if v["value"] == spike_checkout_value
|
||||
]
|
||||
assert len(spike_indices) >= 4, f"Expected >= 4 spike indices, got {spike_indices}"
|
||||
# consecutiveness
|
||||
for i in range(1, len(spike_indices)):
|
||||
assert (
|
||||
spike_indices[i] == spike_indices[i - 1] + 1
|
||||
), f"Spike indices should be consecutive, got {spike_indices}"
|
||||
|
||||
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
|
||||
# reset at t31 causes: rate/increase at t30 includes gap (lower), t31 has high rate after reset
|
||||
orders_values = endpoint_values["/orders"]
|
||||
assert (
|
||||
len(orders_values) >= 58
|
||||
), f"Expected >= 58 values for /orders, got {len(orders_values)}"
|
||||
count_steady_orders = sum(
|
||||
1 for v in orders_values if v["value"] == stable_orders_value
|
||||
)
|
||||
assert (
|
||||
count_steady_orders >= 55
|
||||
), f"Expected >= 55 steady {time_aggregation} values ({stable_orders_value}) for /orders, got {count_steady_orders}"
|
||||
# check for counter reset effects - there should be some non-standard values
|
||||
non_standard_orders = [
|
||||
v["value"] for v in orders_values if v["value"] != stable_orders_value
|
||||
]
|
||||
assert (
|
||||
len(non_standard_orders) >= 2
|
||||
), f"Expected >= 2 non-standard values due to counter reset, got {non_standard_orders}"
|
||||
# post-reset value should be higher (new counter value / interval)
|
||||
high_rate_orders = [v for v in non_standard_orders if v > stable_orders_value]
|
||||
assert (
|
||||
len(high_rate_orders) >= 1
|
||||
), f"Expected at least one high {time_aggregation} value after counter reset, got {non_standard_orders}"
|
||||
|
||||
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes
|
||||
users_values = endpoint_values["/users"]
|
||||
assert (
|
||||
len(users_values) >= 54
|
||||
), f"Expected >= 54 values for /users, got {len(users_values)}"
|
||||
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
|
||||
# most values should be 0 (flat periods between increments)
|
||||
assert (
|
||||
count_zero_users >= 40
|
||||
), f"Expected >= 40 zero {time_aggregation} values for /users (sparse data), got {count_zero_users}"
|
||||
# non-zero values should be 0.0167 (1/60 increment rate)
|
||||
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
|
||||
count_increment_rate = sum(1 for v in non_zero_users if v == spike_users_value)
|
||||
assert (
|
||||
count_increment_rate >= 8
|
||||
), f"Expected >= 8 increment {time_aggregation} values ({spike_users_value}) for /users, got {count_increment_rate}"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
||||
@@ -4,7 +4,7 @@ Look at the histogram_data_1h.jsonl file for the relevant data
|
||||
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, List
|
||||
from typing import Callable, List, Optional, Union
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -13,13 +13,16 @@ from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.metrics import Metrics
|
||||
from fixtures.querier import (
|
||||
build_builder_query,
|
||||
build_order_by,
|
||||
get_all_series,
|
||||
get_series_values,
|
||||
make_query_request,
|
||||
)
|
||||
from fixtures.utils import get_testdata_file_path
|
||||
|
||||
|
||||
FILE = get_testdata_file_path("histogram_data_1h.jsonl")
|
||||
FILE_WITH_MANY_GROUPS = get_testdata_file_path("histogram_data_1h_many_groups.jsonl")
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -521,4 +524,564 @@ def test_histogram_percentile_for_delta_service(
|
||||
assert len(result_values) == 60
|
||||
assert result_values[0]["value"] == zeroth_value
|
||||
assert result_values[1]["value"] == first_value
|
||||
assert result_values[-1]["value"] == last_value
|
||||
assert result_values[-1]["value"] == last_value
|
||||
|
||||
|
||||
def _assert_series_endpoint_labels(
|
||||
series: list,
|
||||
expected_endpoints: Union[set, List[str]],
|
||||
prefix: str,
|
||||
) -> None:
|
||||
labels = [s.get("labels", [{}])[0].get("value", "unknown") for s in series]
|
||||
if isinstance(expected_endpoints, set):
|
||||
assert (
|
||||
set(labels) == expected_endpoints
|
||||
), f"Expected {prefix} endpoints {expected_endpoints}, got {set(labels)}"
|
||||
else:
|
||||
assert labels == expected_endpoints, (
|
||||
f"Expected {prefix} endpoints in order {expected_endpoints}, got {labels}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"order_suffix,order_by,limit,expected_count,expected_endpoints",
|
||||
[
|
||||
(
|
||||
"no_order",
|
||||
None,
|
||||
None,
|
||||
3,
|
||||
["/checkout", "/health", "/orders"],
|
||||
),
|
||||
(
|
||||
"asc",
|
||||
[build_order_by("endpoint", "asc")],
|
||||
None,
|
||||
3,
|
||||
["/checkout", "/health", "/orders"],
|
||||
),
|
||||
(
|
||||
"asc_lim2",
|
||||
[build_order_by("endpoint", "asc")],
|
||||
2,
|
||||
2,
|
||||
["/checkout", "/health"],
|
||||
),
|
||||
(
|
||||
"desc",
|
||||
[build_order_by("endpoint", "desc")],
|
||||
None,
|
||||
3,
|
||||
["/orders", "/health", "/checkout"],
|
||||
),
|
||||
(
|
||||
"desc_lim2",
|
||||
[build_order_by("endpoint", "desc")],
|
||||
2,
|
||||
2,
|
||||
["/orders", "/health"],
|
||||
),
|
||||
(
|
||||
"asc_metric_name",
|
||||
[build_order_by("count(test_histogram_count_groupby_asc_metric_name)", "asc")],
|
||||
None,
|
||||
3,
|
||||
["/health", "/orders", "/checkout"], ## health and orders have the same size so they are then sorted endpoint as a tiebreaker
|
||||
),
|
||||
(
|
||||
"asc_metric_name_lim2",
|
||||
[build_order_by("count(test_histogram_count_groupby_asc_metric_name_lim2)", "asc")],
|
||||
2,
|
||||
2,
|
||||
["/health", "/orders"],
|
||||
),
|
||||
(
|
||||
"desc_metric_name",
|
||||
[build_order_by("count(test_histogram_count_groupby_desc_metric_name)", "desc")],
|
||||
None,
|
||||
3,
|
||||
["/checkout", "/health", "/orders"], ## health and orders have the same size so they are then sorted endpoint as a tiebreaker
|
||||
),
|
||||
(
|
||||
"desc_metric_name_lim2",
|
||||
[build_order_by("count(test_histogram_count_groupby_desc_metric_name_lim2)", "desc")],
|
||||
2,
|
||||
2,
|
||||
["/checkout", "/health"],
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_histogram_count_group_by_endpoint(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_metrics: Callable[[List[Metrics]], None],
|
||||
order_suffix: str,
|
||||
order_by: Optional[List],
|
||||
limit: Optional[int],
|
||||
expected_count: int,
|
||||
expected_endpoints: Union[set, List[str]],
|
||||
) -> None:
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
metric_name = f"test_histogram_count_groupby_{order_suffix}"
|
||||
|
||||
metrics = Metrics.load_from_file(
|
||||
FILE,
|
||||
base_time=now - timedelta(minutes=60),
|
||||
metric_name_override=metric_name,
|
||||
)
|
||||
insert_metrics(metrics)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
query_count = build_builder_query(
|
||||
"A",
|
||||
metric_name,
|
||||
"increase",
|
||||
"count",
|
||||
comparisonSpaceAggregationParam={"threshold": 1000, "operator": "<="},
|
||||
group_by=["endpoint"],
|
||||
order_by=order_by,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
response = make_query_request(signoz, token, start_ms, end_ms, [query_count])
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
|
||||
data = response.json()
|
||||
count_all_series = get_all_series(data, "A")
|
||||
|
||||
assert (
|
||||
len(count_all_series) == expected_count
|
||||
), f"Expected {expected_count} series, got {len(count_all_series)}"
|
||||
|
||||
_assert_series_endpoint_labels(count_all_series, expected_endpoints, "count")
|
||||
|
||||
count_values = {}
|
||||
for series in count_all_series:
|
||||
endpoint = series.get("labels", [{}])[0].get("value", "unknown")
|
||||
count_values[endpoint] = sorted(
|
||||
series.get("values", []), key=lambda x: x["timestamp"]
|
||||
)
|
||||
|
||||
for endpoint, values in count_values.items():
|
||||
for v in values:
|
||||
assert v["value"] >= 0, f"Count for {endpoint} should not be negative: {v['value']}"
|
||||
|
||||
# /health (cumulative, service=api): 59 points, increase starts at 11/min → 69/min
|
||||
if "/health" in count_values:
|
||||
vals = count_values["/health"]
|
||||
assert vals[0]["value"] == 11, f"Expected /health count first=11, got {vals[0]['value']}"
|
||||
assert vals[-1]["value"] == 69, f"Expected /health count last=69, got {vals[-1]['value']}"
|
||||
|
||||
# /orders (cumulative, service=api): same distribution as /health
|
||||
if "/orders" in count_values:
|
||||
vals = count_values["/orders"]
|
||||
assert vals[0]["value"] == 11, f"Expected /orders count first=11, got {vals[0]['value']}"
|
||||
assert vals[-1]["value"] == 69, f"Expected /orders count last=69, got {vals[-1]['value']}"
|
||||
|
||||
# /checkout (delta, service=web): 60 points, zeroth=12345 (raw delta), then 11/min → 69/min
|
||||
if "/checkout" in count_values:
|
||||
vals = count_values["/checkout"]
|
||||
assert vals[0]["value"] == 12345, f"Expected /checkout count zeroth=12345, got {vals[0]['value']}"
|
||||
assert vals[1]["value"] == 11, f"Expected /checkout count first=11, got {vals[1]['value']}"
|
||||
assert vals[-1]["value"] == 69, f"Expected /checkout count last=69, got {vals[-1]['value']}"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"order_suffix,order_by,limit,expected_count,expected_endpoints",
|
||||
[
|
||||
(
|
||||
"no_order",
|
||||
None,
|
||||
None,
|
||||
4,
|
||||
[ "/checkout", "/health", "/orders", "/coupon"],
|
||||
),
|
||||
(
|
||||
"only_limit",
|
||||
None,
|
||||
1,
|
||||
1,
|
||||
[ "/checkout"], ##health and checkout have the same size so they are then sorted endpoint as a tiebreaker, and only checkout makes the limit
|
||||
),
|
||||
(
|
||||
"asc",
|
||||
[build_order_by("endpoint", "asc")],
|
||||
None,
|
||||
4,
|
||||
[ "/checkout", "/coupon", "/health", "/orders"],
|
||||
),
|
||||
(
|
||||
"asc_lim2",
|
||||
[build_order_by("endpoint", "asc")],
|
||||
2,
|
||||
2,
|
||||
["/checkout", "/coupon"],
|
||||
),
|
||||
(
|
||||
"desc",
|
||||
[build_order_by("endpoint", "desc")],
|
||||
None,
|
||||
4,
|
||||
["/orders", "/health", "/coupon", "/checkout"],
|
||||
),
|
||||
(
|
||||
"desc_lim2",
|
||||
[build_order_by("endpoint", "desc")],
|
||||
2,
|
||||
2,
|
||||
["/orders", "/health"],
|
||||
),
|
||||
(
|
||||
"asc_metric_name",
|
||||
[build_order_by("p75(test_histogram_p75_groupby_asc_metric_name)", "asc")],
|
||||
None,
|
||||
4,
|
||||
["/coupon", "/orders", "/checkout", "/health"], ## health and checkout have the same size so they are then sorted endpoint as a tiebreaker
|
||||
),
|
||||
(
|
||||
"asc_metric_name_lim2",
|
||||
[build_order_by("p75(test_histogram_p75_groupby_asc_metric_name_lim2)", "asc")],
|
||||
2,
|
||||
2,
|
||||
["/coupon", "/orders"],
|
||||
),
|
||||
(
|
||||
"asc_metric_name_lim3",
|
||||
[build_order_by("p75(test_histogram_p75_groupby_asc_metric_name_lim3)", "asc")],
|
||||
3,
|
||||
3,
|
||||
["/coupon", "/orders", "/checkout"], ##health and checkout have the same size so they are then sorted endpoint as a tiebreaker, and only checkout makes the limit
|
||||
),
|
||||
(
|
||||
"desc_metric_name",
|
||||
[build_order_by("p75(test_histogram_p75_groupby_desc_metric_name)", "desc")],
|
||||
None,
|
||||
4,
|
||||
[ "/checkout", "/health", "/orders", "/coupon"],
|
||||
),
|
||||
(
|
||||
"desc_metric_name_lim2",
|
||||
[build_order_by("p75(test_histogram_p75_groupby_desc_metric_name_lim2)", "desc")],
|
||||
2,
|
||||
2,
|
||||
[ "/checkout", "/health"],
|
||||
),
|
||||
(
|
||||
"desc_metric_name_lim2",
|
||||
[build_order_by("p75(test_histogram_p75_groupby_desc_metric_name_lim2)", "desc")],
|
||||
1,
|
||||
1,
|
||||
[ "/checkout"], ##health and checkout have the same size so they are then sorted endpoint as a tiebreaker, and only checkout makes the limit
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_histogram_percentile_group_by_endpoint(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_metrics: Callable[[List[Metrics]], None],
|
||||
order_suffix: str,
|
||||
order_by: Optional[List],
|
||||
limit: Optional[int],
|
||||
expected_count: int,
|
||||
expected_endpoints: Union[set, List[str]],
|
||||
) -> None:
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
metric_name = f"test_histogram_p75_groupby_{order_suffix}"
|
||||
|
||||
metrics = Metrics.load_from_file(
|
||||
FILE_WITH_MANY_GROUPS,
|
||||
base_time=now - timedelta(minutes=60),
|
||||
metric_name_override=metric_name,
|
||||
)
|
||||
insert_metrics(metrics)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
query_p75 = build_builder_query(
|
||||
"A",
|
||||
metric_name,
|
||||
"doesnotreallymatter",
|
||||
"p75",
|
||||
group_by=["endpoint"],
|
||||
order_by=order_by,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
response = make_query_request(signoz, token, start_ms, end_ms, [query_p75])
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
|
||||
data = response.json()
|
||||
p75_series = get_all_series(data, "A")
|
||||
|
||||
for series in p75_series:
|
||||
endpoint = series.get("labels", [{}])[0].get("value", "unknown")
|
||||
|
||||
assert (
|
||||
len(p75_series) == expected_count
|
||||
), f"Expected {expected_count} p75 series, got {len(p75_series)}"
|
||||
|
||||
_assert_series_endpoint_labels(p75_series, expected_endpoints, "p75")
|
||||
|
||||
p75_values = {}
|
||||
for series in p75_series:
|
||||
endpoint = series.get("labels", [{}])[0].get("value", "unknown")
|
||||
p75_values[endpoint] = sorted(
|
||||
series.get("values", []), key=lambda x: x["timestamp"]
|
||||
)
|
||||
|
||||
for endpoint, values in p75_values.items():
|
||||
for v in values:
|
||||
assert v["value"] >= 0, f"p75 for {endpoint} should not be negative: {v['value']}"
|
||||
|
||||
# /health (cumulative, service=api)
|
||||
if "/health" in p75_values:
|
||||
vals = p75_values["/health"]
|
||||
assert vals[0]["value"] == 6000, f"Expected /health p75 first=6000, got {vals[0]['value']}"
|
||||
assert vals[-1]["value"] == 6000, f"Expected /health p75 last=6000, got {vals[-1]['value']}"
|
||||
|
||||
# /orders (cumulative, service=api): same distribution as /health
|
||||
if "/orders" in p75_values:
|
||||
vals = p75_values["/orders"]
|
||||
assert vals[0]["value"] == 4500, f"Expected /orders p75 first=4500, got {vals[0]['value']}"
|
||||
assert vals[-1]["value"] == 4500, f"Expected /orders p75 last=4500, got {vals[-1]['value']}"
|
||||
|
||||
# /checkout (delta, service=web): 60 points
|
||||
if "/checkout" in p75_values:
|
||||
vals = p75_values["/checkout"]
|
||||
assert vals[0]["value"] == 6000, f"Expected /checkout p75 zeroth=6000, got {vals[0]['value']}"
|
||||
assert vals[1]["value"] == 6000, f"Expected /checkout p75 first=6000, got {vals[1]['value']}"
|
||||
assert vals[-1]["value"] == 6000, f"Expected /checkout p75 last=6000, got {vals[-1]['value']}"
|
||||
|
||||
# /coupon (delta, service=web): 60 points
|
||||
if "/coupon" in p75_values:
|
||||
vals = p75_values["/coupon"]
|
||||
assert vals[0]["value"] == 1125, f"Expected /coupon p75 zeroth=1125, got {vals[0]['value']}"
|
||||
assert vals[1]["value"] == 1125, f"Expected /coupon p75 first=1125, got {vals[1]['value']}"
|
||||
assert vals[-1]["value"] == 1125, f"Expected /coupon p75 last=1125, got {vals[-1]['value']}"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"order_suffix,order_by,limit,expected_count,expected_endpoints, expected_status_codes",
|
||||
[
|
||||
(
|
||||
"no_order",
|
||||
None,
|
||||
None,
|
||||
5,
|
||||
[ "/checkout", "/health", "/orders", "/coupon", "/coupon"], ## coupon has 200 and 500 status codes so it will appear twice
|
||||
[ "200", "200", "200", "200", "500"],
|
||||
),
|
||||
(
|
||||
"only_limit",
|
||||
None,
|
||||
1,
|
||||
1,
|
||||
[ "/checkout"], ##health and checkout have the same size so they are then sorted endpoint as a tiebreaker, and only checkout makes the limit
|
||||
[ "200"]
|
||||
),
|
||||
(
|
||||
"asc_endpoint",
|
||||
[build_order_by("endpoint", "asc")],
|
||||
None,
|
||||
5,
|
||||
[ "/checkout", "/coupon", "/coupon", "/health", "/orders"],
|
||||
[ "200", "200", "500", "200", "200"],
|
||||
),
|
||||
(
|
||||
"asc_endpoint_status_code",
|
||||
[build_order_by("endpoint", "asc"), build_order_by("status_code", "asc")],
|
||||
None,
|
||||
5,
|
||||
[ "/checkout", "/coupon", "/coupon", "/health", "/orders"],
|
||||
[ "200", "200", "500", "200", "200"],
|
||||
),
|
||||
(
|
||||
"asc_status_code_endpoint",
|
||||
[build_order_by("status_code", "asc"), build_order_by("endpoint", "asc")],
|
||||
None,
|
||||
5,
|
||||
[ "/checkout", "/coupon", "/health", "/orders", "/coupon"],
|
||||
[ "200", "200", "200", "200", "500"],
|
||||
),
|
||||
(
|
||||
"asc_endpoint_limit_2",
|
||||
[build_order_by("endpoint", "asc")],
|
||||
2,
|
||||
2,
|
||||
[ "/checkout", "/coupon"],
|
||||
[ "200", "200"],
|
||||
),
|
||||
(
|
||||
"asc_endpoint_status_code_limit_2",
|
||||
[build_order_by("endpoint", "asc"), build_order_by("status_code", "asc")],
|
||||
2,
|
||||
2,
|
||||
[ "/checkout", "/coupon"],
|
||||
[ "200", "200"],
|
||||
),
|
||||
(
|
||||
"asc_status_code_endpoint_limit_4",
|
||||
[build_order_by("status_code", "asc"), build_order_by("endpoint", "asc")],
|
||||
4,
|
||||
4,
|
||||
[ "/checkout", "/coupon", "/health", "/orders"],
|
||||
[ "200", "200", "200", "200"],
|
||||
),
|
||||
(
|
||||
"desc_endpoint",
|
||||
[build_order_by("endpoint", "desc")],
|
||||
None,
|
||||
5,
|
||||
["/orders", "/health", "/coupon", "/coupon", "/checkout"],
|
||||
[ "200", "200", "200", "500", "200"],
|
||||
),
|
||||
(
|
||||
"desc_endpoint_status_code",
|
||||
[build_order_by("endpoint", "desc"), build_order_by("status_code", "desc")],
|
||||
None,
|
||||
5,
|
||||
["/orders", "/health", "/coupon", "/coupon", "/checkout"],
|
||||
[ "200", "200", "500", "200", "200"],
|
||||
),
|
||||
(
|
||||
"desc_status_code_endpoint",
|
||||
[build_order_by("status_code", "desc"), build_order_by("endpoint", "desc")],
|
||||
None,
|
||||
5,
|
||||
["/coupon", "/orders", "/health", "/coupon", "/checkout"],
|
||||
[ "500", "200", "200", "200", "200"],
|
||||
),
|
||||
(
|
||||
"desc_endpoint_limit2",
|
||||
[build_order_by("endpoint", "desc")],
|
||||
3,
|
||||
3,
|
||||
["/orders", "/health", "/coupon"],
|
||||
[ "200", "200", "200"],
|
||||
),
|
||||
(
|
||||
"desc_endpoint_status_code_limit3",
|
||||
[build_order_by("endpoint", "desc"), build_order_by("status_code", "desc")],
|
||||
3,
|
||||
3,
|
||||
["/orders", "/health", "/coupon"],
|
||||
[ "200", "200", "500"],
|
||||
),
|
||||
(
|
||||
"desc_status_code_endpoint_limit2",
|
||||
[build_order_by("status_code", "desc"), build_order_by("endpoint", "desc")],
|
||||
2,
|
||||
2,
|
||||
["/coupon", "/orders"],
|
||||
[ "500", "200"],
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_histogram_percentile_group_by_endpoint_and_status_code(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_metrics: Callable[[List[Metrics]], None],
|
||||
order_suffix: str,
|
||||
order_by: Optional[List],
|
||||
limit: Optional[int],
|
||||
expected_count: int,
|
||||
expected_endpoints: Union[set, List[str]],
|
||||
expected_status_codes: Union[set, List[str]],
|
||||
) -> None:
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
metric_name = f"test_histogram_p75_groupby_{order_suffix}"
|
||||
|
||||
metrics = Metrics.load_from_file(
|
||||
FILE_WITH_MANY_GROUPS,
|
||||
base_time=now - timedelta(minutes=60),
|
||||
metric_name_override=metric_name,
|
||||
)
|
||||
insert_metrics(metrics)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
query_p75 = build_builder_query(
|
||||
"A",
|
||||
metric_name,
|
||||
"doesnotreallymatter",
|
||||
"p75",
|
||||
group_by=["endpoint", "status_code"],
|
||||
order_by=order_by,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
response = make_query_request(signoz, token, start_ms, end_ms, [query_p75])
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
|
||||
data = response.json()
|
||||
p75_series = get_all_series(data, "A")
|
||||
|
||||
for series in p75_series:
|
||||
endpoint = series.get("labels", [{}])[0].get("value", "unknown")
|
||||
status_code = series.get("labels", [{}])[1].get("value", "unknown")
|
||||
|
||||
assert (
|
||||
len(p75_series) == expected_count
|
||||
), f"Expected {expected_count} p75 series, got {len(p75_series)}"
|
||||
|
||||
_assert_series_endpoint_labels(p75_series, expected_endpoints, "p75")
|
||||
|
||||
endpoints = [s.get("labels", [{}])[0].get("value", "unknown") for s in p75_series]
|
||||
assert endpoints == expected_endpoints, (
|
||||
f"Expected p75 endpoints in order {expected_endpoints}, got {endpoints}"
|
||||
)
|
||||
status_codes = [s.get("labels", [{}])[1].get("value", "unknown") for s in p75_series]
|
||||
assert status_codes == expected_status_codes, (
|
||||
f"Expected p75 endpoints in order {expected_status_codes}, got {status_codes}"
|
||||
)
|
||||
|
||||
p75_values = {}
|
||||
for series in p75_series:
|
||||
endpoint = series.get("labels", [{}])[0].get("value", "unknown")
|
||||
status_code = series.get("labels", [{}])[1].get("value", "unknown")
|
||||
p75_values[endpoint+status_code] = sorted(
|
||||
series.get("values", []), key=lambda x: x["timestamp"]
|
||||
)
|
||||
|
||||
for endpoint, values in p75_values.items():
|
||||
for v in values:
|
||||
assert v["value"] >= 0, f"p75 for {endpoint} should not be negative: {v['value']}"
|
||||
|
||||
# /health (cumulative, service=api)
|
||||
if "/health200" in p75_values:
|
||||
vals = p75_values["/health200"]
|
||||
assert vals[0]["value"] == 6000, f"Expected /health p75 first=6000, got {vals[0]['value']}"
|
||||
assert vals[-1]["value"] == 6000, f"Expected /health p75 last=6000, got {vals[-1]['value']}"
|
||||
|
||||
# /orders (cumulative, service=api): same distribution as /health
|
||||
if "/orders200" in p75_values:
|
||||
vals = p75_values["/orders200"]
|
||||
assert vals[0]["value"] == 4500, f"Expected /orders p75 first=4500, got {vals[0]['value']}"
|
||||
assert vals[-1]["value"] == 4500, f"Expected /orders p75 last=4500, got {vals[-1]['value']}"
|
||||
|
||||
# /checkout (delta, service=web): 60 points
|
||||
if "/checkout200" in p75_values:
|
||||
vals = p75_values["/checkout200"]
|
||||
assert vals[0]["value"] == 6000, f"Expected /checkout p75 zeroth=6000, got {vals[0]['value']}"
|
||||
assert vals[1]["value"] == 6000, f"Expected /checkout p75 first=6000, got {vals[1]['value']}"
|
||||
assert vals[-1]["value"] == 6000, f"Expected /checkout p75 last=6000, got {vals[-1]['value']}"
|
||||
|
||||
# /coupon (delta, service=web): 60 points
|
||||
if "/coupon200" in p75_values:
|
||||
vals = p75_values["/coupon200"]
|
||||
assert vals[0]["value"] == 1250, f"Expected /coupon200 p75 zeroth=1250, got {vals[0]['value']}"
|
||||
assert vals[1]["value"] == 1250, f"Expected /coupon200 p75 first=1250, got {vals[1]['value']}"
|
||||
assert vals[-1]["value"] == 1250, f"Expected /coupon200 p75 last=1250, got {vals[-1]['value']}"
|
||||
|
||||
if "/coupon500" in p75_values:
|
||||
vals = p75_values["/coupon500"]
|
||||
assert vals[0]["value"] == 750, f"Expected /coupon500 p75 zeroth=750, got {vals[0]['value']}"
|
||||
assert vals[1]["value"] == 750, f"Expected /coupon500 p75 first=750, got {vals[1]['value']}"
|
||||
assert vals[-1]["value"] == 750, f"Expected /coupon500 p75 last=750, got {vals[-1]['value']}"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, List
|
||||
from typing import Callable, List, Optional, Union
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -9,6 +9,8 @@ from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.metrics import Metrics
|
||||
from fixtures.querier import (
|
||||
build_builder_query,
|
||||
build_order_by,
|
||||
get_all_series,
|
||||
get_series_values,
|
||||
make_query_request,
|
||||
)
|
||||
@@ -139,3 +141,137 @@ def test_for_multiple_aggregations(
|
||||
assert result_values[19]["value"] == twentieth_min_val
|
||||
assert result_values[20]["value"] == twenty_first_min_val
|
||||
assert result_values[30]["value"] == thirty_first_min_val
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"metric_suffix,order_by,limit,expected_count,expected_services",
|
||||
[
|
||||
(
|
||||
"no_order",
|
||||
None, # default ordering: desc by avg of all metric values for a group
|
||||
None,
|
||||
3,
|
||||
["lab", "web", "api"], # sum of all values: lab=42000, api=36000, web=34000. avg of all sums: lab=700, api=600, web=680
|
||||
),
|
||||
(
|
||||
"only_limit",
|
||||
None,
|
||||
2,
|
||||
2,
|
||||
["lab", "web"], # top 2 by default desc: lab=42000, api=36000
|
||||
),
|
||||
(
|
||||
"asc",
|
||||
[build_order_by("service", "asc")],
|
||||
None,
|
||||
3,
|
||||
["api", "lab", "web"],
|
||||
),
|
||||
(
|
||||
"asc_lim2",
|
||||
[build_order_by("service", "asc")],
|
||||
2,
|
||||
2,
|
||||
["api", "lab"],
|
||||
),
|
||||
(
|
||||
"desc",
|
||||
[build_order_by("service", "desc")],
|
||||
None,
|
||||
3,
|
||||
["web", "lab", "api"],
|
||||
),
|
||||
(
|
||||
"desc_lim2",
|
||||
[build_order_by("service", "desc")],
|
||||
2,
|
||||
2,
|
||||
["web", "lab"],
|
||||
),
|
||||
(
|
||||
"asc_metric_name",
|
||||
[build_order_by("sum(test_gauge_groupby_asc_metric_name)", "asc")],
|
||||
None,
|
||||
3,
|
||||
["api", "web", "lab"],
|
||||
),
|
||||
(
|
||||
"asc_metric_name_lim2",
|
||||
[build_order_by("sum(test_gauge_groupby_asc_metric_name_lim2)", "asc")],
|
||||
2,
|
||||
2,
|
||||
["api", "web"],
|
||||
),
|
||||
(
|
||||
"desc_metric_name",
|
||||
[build_order_by("sum(test_gauge_groupby_desc_metric_name)", "desc")],
|
||||
None,
|
||||
3,
|
||||
["lab", "web", "api"],
|
||||
),
|
||||
(
|
||||
"desc_metric_name_lim2",
|
||||
[build_order_by("sum(test_gauge_groupby_desc_metric_name_lim2)", "desc")],
|
||||
2,
|
||||
2,
|
||||
["lab", "web"],
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_gauge_group_by_service(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_metrics: Callable[[List[Metrics]], None],
|
||||
metric_suffix: str,
|
||||
order_by: Optional[List],
|
||||
limit: Optional[int],
|
||||
expected_count: int,
|
||||
expected_services: Union[set, List[str]],
|
||||
) -> None:
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
metric_name = f"test_gauge_groupby_{metric_suffix}"
|
||||
|
||||
metrics = Metrics.load_from_file(
|
||||
FILE,
|
||||
base_time=now - timedelta(minutes=60),
|
||||
metric_name_override=metric_name,
|
||||
)
|
||||
insert_metrics(metrics)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
query = build_builder_query(
|
||||
"A",
|
||||
metric_name,
|
||||
"max",
|
||||
"sum",
|
||||
group_by=["service"],
|
||||
order_by=order_by,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
response = make_query_request(signoz, token, start_ms, end_ms, [query])
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
|
||||
data = response.json()
|
||||
all_series = get_all_series(data, "A")
|
||||
|
||||
assert (
|
||||
len(all_series) == expected_count
|
||||
), f"Expected {expected_count} series, got {len(all_series)}"
|
||||
|
||||
service_labels = [
|
||||
series.get("labels", [{}])[0].get("value", "unknown")
|
||||
for series in all_series
|
||||
]
|
||||
|
||||
if isinstance(expected_services, set):
|
||||
assert (
|
||||
set(service_labels) == expected_services
|
||||
), f"Expected services {expected_services}, got {set(service_labels)}"
|
||||
else:
|
||||
assert service_labels == expected_services, (
|
||||
f"Expected services {expected_services}, got {service_labels}"
|
||||
)
|
||||
|
||||
@@ -5,13 +5,16 @@ Look at the delta_counters_1h.jsonl file for the relevant data
|
||||
import os
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Any, Callable, List
|
||||
from typing import Any, Callable, List, Optional, Union
|
||||
|
||||
import pytest
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.metrics import Metrics
|
||||
from fixtures.querier import (
|
||||
build_builder_query,
|
||||
build_order_by,
|
||||
get_all_series,
|
||||
get_series_values,
|
||||
make_query_request,
|
||||
@@ -69,16 +72,61 @@ def test_rate_with_steady_values_and_reset(
|
||||
assert v["value"] >= 0, f"Rate should not be negative: {v['value']}"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"order_suffix,order_by,limit,expected_count,expected_endpoints",
|
||||
[
|
||||
(
|
||||
"no_order",
|
||||
None,
|
||||
None,
|
||||
5,
|
||||
{"/products", "/health", "/checkout", "/orders", "/users"},
|
||||
),
|
||||
(
|
||||
"asc",
|
||||
[build_order_by("endpoint", "asc")],
|
||||
None,
|
||||
5,
|
||||
["/checkout", "/health", "/orders", "/products", "/users"],
|
||||
),
|
||||
(
|
||||
"asc_lim3",
|
||||
[build_order_by("endpoint", "asc")],
|
||||
3,
|
||||
3,
|
||||
["/checkout", "/health", "/orders"],
|
||||
),
|
||||
(
|
||||
"desc",
|
||||
[build_order_by("endpoint", "desc")],
|
||||
None,
|
||||
5,
|
||||
["/users", "/products", "/orders", "/health", "/checkout"],
|
||||
),
|
||||
(
|
||||
"desc_lim3",
|
||||
[build_order_by("endpoint", "desc")],
|
||||
3,
|
||||
3,
|
||||
["/users", "/products", "/orders"],
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_rate_group_by_endpoint(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_metrics: Callable[[List[Metrics]], None],
|
||||
order_suffix: str,
|
||||
order_by: Optional[List],
|
||||
limit: Optional[int],
|
||||
expected_count: int,
|
||||
expected_endpoints: Union[set, List[str]],
|
||||
) -> None:
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
metric_name = "test_rate_groupby"
|
||||
metric_name = f"test_rate_groupby_{order_suffix}"
|
||||
|
||||
metrics = Metrics.load_from_file(
|
||||
DELTA_COUNTERS_FILE,
|
||||
@@ -94,6 +142,8 @@ def test_rate_group_by_endpoint(
|
||||
"rate",
|
||||
"sum",
|
||||
group_by=["endpoint"],
|
||||
order_by=order_by,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
response = make_query_request(signoz, token, start_ms, end_ms, [query])
|
||||
@@ -102,10 +152,23 @@ def test_rate_group_by_endpoint(
|
||||
data = response.json()
|
||||
all_series = get_all_series(data, "A")
|
||||
|
||||
# Should have 5 different endpoints
|
||||
assert (
|
||||
len(all_series) == 5
|
||||
), f"Expected 5 series for 5 endpoints, got {len(all_series)}"
|
||||
len(all_series) == expected_count
|
||||
), f"Expected {expected_count} series, got {len(all_series)}"
|
||||
|
||||
endpoint_labels = [
|
||||
series.get("labels", [{}])[0].get("value", "unknown")
|
||||
for series in all_series
|
||||
]
|
||||
|
||||
if isinstance(expected_endpoints, set):
|
||||
assert (
|
||||
set(endpoint_labels) == expected_endpoints
|
||||
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_labels)}"
|
||||
else:
|
||||
assert endpoint_labels == expected_endpoints, (
|
||||
f"Expected endpoints {expected_endpoints}, got {endpoint_labels}"
|
||||
)
|
||||
|
||||
# endpoint -> values
|
||||
endpoint_values = {}
|
||||
@@ -114,11 +177,6 @@ def test_rate_group_by_endpoint(
|
||||
values = sorted(series.get("values", []), key=lambda x: x["timestamp"])
|
||||
endpoint_values[endpoint] = values
|
||||
|
||||
expected_endpoints = {"/products", "/health", "/checkout", "/orders", "/users"}
|
||||
assert (
|
||||
set(endpoint_values.keys()) == expected_endpoints
|
||||
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_values.keys())}"
|
||||
|
||||
# at no point rate should be negative
|
||||
for endpoint, values in endpoint_values.items():
|
||||
for v in values:
|
||||
@@ -128,93 +186,95 @@ def test_rate_group_by_endpoint(
|
||||
|
||||
# /health: 60 data points (t01-t60), steady +10/min
|
||||
# rate = 10/60 = 0.167
|
||||
health_values = endpoint_values["/health"]
|
||||
assert (
|
||||
len(health_values) == 60
|
||||
), f"Expected 60 values for /health, got {len(health_values)}"
|
||||
count_steady_health = sum(1 for v in health_values if v["value"] == 0.167)
|
||||
assert (
|
||||
count_steady_health == 60
|
||||
), f"Expected == 60 steady rate values (0.167) for /health, got {count_steady_health}"
|
||||
# all /health rates should be 0.167 except possibly first/last due to boundaries
|
||||
for v in health_values[1:-1]:
|
||||
assert v["value"] == 0.167, f"Expected /health rate 0.167, got {v['value']}"
|
||||
if "/health" in endpoint_values:
|
||||
health_values = endpoint_values["/health"]
|
||||
assert (
|
||||
len(health_values) == 60
|
||||
), f"Expected 60 values for /health, got {len(health_values)}"
|
||||
count_steady_health = sum(1 for v in health_values if v["value"] == 0.167)
|
||||
assert (
|
||||
count_steady_health == 60
|
||||
), f"Expected == 60 steady rate values (0.167) for /health, got {count_steady_health}"
|
||||
# all /health rates should be 0.167 except possibly first/last due to boundaries
|
||||
for v in health_values[1:-1]:
|
||||
assert v["value"] == 0.167, f"Expected /health rate 0.167, got {v['value']}"
|
||||
|
||||
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
|
||||
# rate = 20/60 = 0.333, gap causes lower averaged rate at boundary
|
||||
products_values = endpoint_values["/products"]
|
||||
assert (
|
||||
len(products_values) == 51
|
||||
), f"Expected 51 values for /products, got {len(products_values)}"
|
||||
count_steady_products = sum(1 for v in products_values if v["value"] == 0.333)
|
||||
|
||||
assert (
|
||||
count_steady_products == 51
|
||||
), f"Expected 51 steady rate values (0.333) for /products, got {count_steady_products}"
|
||||
if "/products" in endpoint_values:
|
||||
products_values = endpoint_values["/products"]
|
||||
assert (
|
||||
len(products_values) == 51
|
||||
), f"Expected 51 values for /products, got {len(products_values)}"
|
||||
count_steady_products = sum(1 for v in products_values if v["value"] == 0.333)
|
||||
assert (
|
||||
count_steady_products == 51
|
||||
), f"Expected 51 steady rate values (0.333) for /products, got {count_steady_products}"
|
||||
|
||||
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
|
||||
# normal rate = 1/60 = 0.0167, spike rate = 50/60 = 0.833
|
||||
checkout_values = endpoint_values["/checkout"]
|
||||
assert (
|
||||
len(checkout_values) == 61
|
||||
), f"Expected 61 values for /checkout, got {len(checkout_values)}"
|
||||
count_steady_checkout = sum(1 for v in checkout_values if v["value"] == 0.0167)
|
||||
assert (
|
||||
count_steady_checkout == 56
|
||||
), f"Expected 56 steady rate values (0.0167) for /checkout, got {count_steady_checkout}"
|
||||
# check that spike values exist (traffic spike +50/min at t40-t44)
|
||||
count_spike_checkout = sum(1 for v in checkout_values if v["value"] == 0.833)
|
||||
assert (
|
||||
count_spike_checkout == 5
|
||||
), f"Expected 5 spike rate values (0.833) for /checkout, got {count_spike_checkout}"
|
||||
|
||||
# spike values should be consecutive
|
||||
spike_indices = [
|
||||
i for i, v in enumerate[Any](checkout_values) if v["value"] == 0.833
|
||||
]
|
||||
assert len(spike_indices) == 5, f"Expected 5 spike indices, got {spike_indices}"
|
||||
# consecutiveness
|
||||
for i in range(1, len(spike_indices)):
|
||||
if "/checkout" in endpoint_values:
|
||||
checkout_values = endpoint_values["/checkout"]
|
||||
assert (
|
||||
spike_indices[i] == spike_indices[i - 1] + 1
|
||||
), f"Spike indices should be consecutive, got {spike_indices}"
|
||||
len(checkout_values) == 61
|
||||
), f"Expected 61 values for /checkout, got {len(checkout_values)}"
|
||||
count_steady_checkout = sum(1 for v in checkout_values if v["value"] == 0.0167)
|
||||
assert (
|
||||
count_steady_checkout == 56
|
||||
), f"Expected 56 steady rate values (0.0167) for /checkout, got {count_steady_checkout}"
|
||||
# check that spike values exist (traffic spike +50/min at t40-t44)
|
||||
count_spike_checkout = sum(1 for v in checkout_values if v["value"] == 0.833)
|
||||
assert (
|
||||
count_spike_checkout == 5
|
||||
), f"Expected 5 spike rate values (0.833) for /checkout, got {count_spike_checkout}"
|
||||
# spike values should be consecutive
|
||||
spike_indices = [
|
||||
i for i, v in enumerate[Any](checkout_values) if v["value"] == 0.833
|
||||
]
|
||||
assert len(spike_indices) == 5, f"Expected 5 spike indices, got {spike_indices}"
|
||||
for i in range(1, len(spike_indices)):
|
||||
assert (
|
||||
spike_indices[i] == spike_indices[i - 1] + 1
|
||||
), f"Spike indices should be consecutive, got {spike_indices}"
|
||||
|
||||
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
|
||||
# rate = 5/60 = 0.0833
|
||||
# reset at t31 causes: rate at t30 includes gap (lower), t31 has high rate after reset
|
||||
orders_values = endpoint_values["/orders"]
|
||||
assert (
|
||||
len(orders_values) == 60
|
||||
), f"Expected 59 values for /orders, got {len(orders_values)}"
|
||||
count_steady_orders = sum(1 for v in orders_values if v["value"] == 0.0833)
|
||||
assert (
|
||||
count_steady_orders == 58
|
||||
), f"Expected 58 steady rate values (0.0833) for /orders, got {count_steady_orders}"
|
||||
# check for counter reset effects - there should be some non-standard values
|
||||
non_standard_orders = [v["value"] for v in orders_values if v["value"] != 0.0833]
|
||||
assert (
|
||||
len(non_standard_orders) == 2
|
||||
), f"Expected 2 non-standard values due to counter reset, got {non_standard_orders}"
|
||||
# post-reset value should be higher (new counter value / interval)
|
||||
high_rate_orders = [v for v in non_standard_orders if v > 0.0833]
|
||||
assert (
|
||||
len(high_rate_orders) == 1
|
||||
), f"Expected one high rate value after counter reset, got {non_standard_orders}"
|
||||
if "/orders" in endpoint_values:
|
||||
orders_values = endpoint_values["/orders"]
|
||||
assert (
|
||||
len(orders_values) == 60
|
||||
), f"Expected 60 values for /orders, got {len(orders_values)}"
|
||||
count_steady_orders = sum(1 for v in orders_values if v["value"] == 0.0833)
|
||||
assert (
|
||||
count_steady_orders == 58
|
||||
), f"Expected 58 steady rate values (0.0833) for /orders, got {count_steady_orders}"
|
||||
# check for counter reset effects - there should be some non-standard values
|
||||
non_standard_orders = [v["value"] for v in orders_values if v["value"] != 0.0833]
|
||||
assert (
|
||||
len(non_standard_orders) == 2
|
||||
), f"Expected 2 non-standard values due to counter reset, got {non_standard_orders}"
|
||||
# post-reset value should be higher (new counter value / interval)
|
||||
high_rate_orders = [v for v in non_standard_orders if v > 0.0833]
|
||||
assert (
|
||||
len(high_rate_orders) == 1
|
||||
), f"Expected one high rate value after counter reset, got {non_standard_orders}"
|
||||
|
||||
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes (12 of them)
|
||||
# Rate = 1/60 = 0.0167 during increment, 0 during flat periods
|
||||
users_values = endpoint_values["/users"]
|
||||
assert (
|
||||
len(users_values) == 56
|
||||
), f"Expected 56 values for /users, got {len(users_values)}"
|
||||
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
|
||||
# most values should be 0 (flat periods between increments)
|
||||
assert (
|
||||
count_zero_users == 44
|
||||
), f"Expected 44 zero rate values for /users (sparse data), got {count_zero_users}"
|
||||
# non-zero values should be 0.0167 (1/60 increment rate)
|
||||
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
|
||||
count_increment_rate = sum(1 for v in non_zero_users if v == 0.0167)
|
||||
assert (
|
||||
count_increment_rate == 12
|
||||
), f"Expected 12 increment rate values (0.0167) for /users, got {count_increment_rate}"
|
||||
if "/users" in endpoint_values:
|
||||
users_values = endpoint_values["/users"]
|
||||
assert (
|
||||
len(users_values) == 56
|
||||
), f"Expected 56 values for /users, got {len(users_values)}"
|
||||
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
|
||||
# most values should be 0 (flat periods between increments)
|
||||
assert (
|
||||
count_zero_users == 44
|
||||
), f"Expected 44 zero rate values for /users (sparse data), got {count_zero_users}"
|
||||
# non-zero values should be 0.0167 (1/60 increment rate)
|
||||
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
|
||||
count_increment_rate = sum(1 for v in non_zero_users if v == 0.0167)
|
||||
assert (
|
||||
count_increment_rate == 12
|
||||
), f"Expected 12 increment rate values (0.0167) for /users, got {count_increment_rate}"
|
||||
|
||||
2400
tests/integration/testdata/histogram_data_1h_many_groups.jsonl
vendored
Normal file
2400
tests/integration/testdata/histogram_data_1h_many_groups.jsonl
vendored
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user