Compare commits

..

2 Commits

Author SHA1 Message Date
Piyush Singariya
1ecde40a22 fix: tests passed but unnecessary changes added 2026-03-18 00:24:09 +05:30
Piyush Singariya
7f33145f68 feat: poc mock telemetry mock store 2026-03-17 18:19:37 +05:30
81 changed files with 1323 additions and 2449 deletions

4
.github/CODEOWNERS vendored
View File

@@ -105,10 +105,6 @@ go.mod @therealpandey
/pkg/modules/authdomain/ @vikrantgupta25
/pkg/modules/role/ @vikrantgupta25
# IdentN Owners
/pkg/identn/ @vikrantgupta25
/pkg/http/middleware/identn.go @vikrantgupta25
# Integration tests
/tests/integration/ @vikrantgupta25

View File

@@ -102,3 +102,13 @@ jobs:
run: |
go run cmd/enterprise/*.go generate openapi
git diff --compact-summary --exit-code || (echo; echo "Unexpected difference in openapi spec. Run go run cmd/enterprise/*.go generate openapi locally and commit."; exit 1)
- name: node-install
uses: actions/setup-node@v5
with:
node-version: "22"
- name: install-frontend
run: cd frontend && yarn install
- name: generate-api-clients
run: |
cd frontend && yarn generate:api
git diff --compact-summary --exit-code || (echo; echo "Unexpected difference in generated api clients. Run yarn generate:api in frontend/ locally and commit."; exit 1)

View File

@@ -52,16 +52,16 @@ jobs:
with:
PRIMUS_REF: main
JS_SRC: frontend
languages:
md-languages:
if: |
github.event_name == 'merge_group' ||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
runs-on: ubuntu-latest
steps:
- name: self-checkout
- name: checkout
uses: actions/checkout@v4
- name: run
- name: validate md languages
run: bash frontend/scripts/validate-md-languages.sh
authz:
if: |
@@ -70,55 +70,44 @@ jobs:
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
runs-on: ubuntu-latest
steps:
- name: self-checkout
- name: Checkout code
uses: actions/checkout@v5
- name: node-install
- name: Set up Node.js
uses: actions/setup-node@v5
with:
node-version: "22"
- name: deps-install
- name: Install frontend dependencies
working-directory: ./frontend
run: |
yarn install
- name: uv-install
- name: Install uv
uses: astral-sh/setup-uv@v5
- name: uv-deps
- name: Install Python dependencies
working-directory: ./tests/integration
run: |
uv sync
- name: setup-test
- name: Start test environment
run: |
make py-test-setup
- name: generate
- name: Generate permissions.type.ts
working-directory: ./frontend
run: |
yarn generate:permissions-type
- name: teardown-test
- name: Teardown test environment
if: always()
run: |
make py-test-teardown
- name: validate
- name: Check for changes
run: |
if ! git diff --exit-code frontend/src/hooks/useAuthZ/permissions.type.ts; then
echo "::error::frontend/src/hooks/useAuthZ/permissions.type.ts is out of date. Please run the generator locally and commit the changes: npm run generate:permissions-type (from the frontend directory)"
exit 1
fi
openapi:
if: |
github.event_name == 'merge_group' ||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
runs-on: ubuntu-latest
steps:
- name: self-checkout
uses: actions/checkout@v4
- name: node-install
uses: actions/setup-node@v5
with:
node-version: "22"
- name: install-frontend
run: cd frontend && yarn install
- name: generate-api-clients
run: |
cd frontend && yarn generate:api
git diff --compact-summary --exit-code || (echo; echo "Unexpected difference in generated api clients. Run yarn generate:api in frontend/ locally and commit."; exit 1)

View File

@@ -12,6 +12,14 @@
"editor.formatOnSave": true,
"editor.defaultFormatter": "golang.go"
},
"go.buildTags": "chdb",
"go.testFlags": ["-tags=chdb"],
"go.toolsEnvVars": {
"GOFLAGS": "-tags=chdb"
},
"gopls": {
"build.buildFlags": ["-tags=chdb"]
},
"[sql]": {
"editor.defaultFormatter": "adpyke.vscode-sql-formatter"
},

View File

@@ -308,9 +308,6 @@ user:
allow_self: true
# The duration within which a user can reset their password.
max_token_lifetime: 6h
invite:
# The duration within which a user can accept their invite.
max_token_lifetime: 48h
root:
# Whether to enable the root user. When enabled, a root user is provisioned
# on startup using the email and password below. The root user cannot be
@@ -324,19 +321,3 @@ user:
org:
name: default
id: 00000000-0000-0000-0000-000000000000
##################### IdentN #####################
identn:
tokenizer:
# toggle the identN resolver
enabled: true
# headers to use for tokenizer identN resolver
headers:
- Authorization
- Sec-WebSocket-Protocol
apikey:
# toggle the identN resolver
enabled: true
# headers to use for apikey identN resolver
headers:
- SIGNOZ-API-KEY

View File

@@ -190,7 +190,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:v0.116.0
image: signoz/signoz:v0.115.0
ports:
- "8080:8080" # signoz port
# - "6060:6060" # pprof port

View File

@@ -117,7 +117,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:v0.116.0
image: signoz/signoz:v0.115.0
ports:
- "8080:8080" # signoz port
volumes:

View File

@@ -181,7 +181,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:${VERSION:-v0.116.0}
image: signoz/signoz:${VERSION:-v0.115.0}
container_name: signoz
ports:
- "8080:8080" # signoz port

View File

@@ -109,7 +109,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:${VERSION:-v0.116.0}
image: signoz/signoz:${VERSION:-v0.115.0}
container_name: signoz
ports:
- "8080:8080" # signoz port

View File

@@ -220,13 +220,6 @@ components:
- additions
- deletions
type: object
AuthtypesPatchableRole:
properties:
description:
type: string
required:
- description
type: object
AuthtypesPostableAuthDomain:
properties:
config:
@@ -243,15 +236,6 @@ components:
password:
type: string
type: object
AuthtypesPostableRole:
properties:
description:
type: string
name:
type: string
required:
- name
type: object
AuthtypesPostableRotateToken:
properties:
refreshToken:
@@ -267,31 +251,6 @@ components:
- name
- type
type: object
AuthtypesRole:
properties:
createdAt:
format: date-time
type: string
description:
type: string
id:
type: string
name:
type: string
orgId:
type: string
type:
type: string
updatedAt:
format: date-time
type: string
required:
- id
- name
- description
- type
- orgId
type: object
AuthtypesRoleMapping:
properties:
defaultRole:
@@ -1763,6 +1722,47 @@ components:
- status
- error
type: object
RoletypesPatchableRole:
properties:
description:
type: string
required:
- description
type: object
RoletypesPostableRole:
properties:
description:
type: string
name:
type: string
required:
- name
type: object
RoletypesRole:
properties:
createdAt:
format: date-time
type: string
description:
type: string
id:
type: string
name:
type: string
orgId:
type: string
type:
type: string
updatedAt:
format: date-time
type: string
required:
- id
- name
- description
- type
- orgId
type: object
ServiceaccounttypesFactorAPIKey:
properties:
createdAt:
@@ -4234,7 +4234,7 @@ paths:
properties:
data:
items:
$ref: '#/components/schemas/AuthtypesRole'
$ref: '#/components/schemas/RoletypesRole'
type: array
status:
type: string
@@ -4277,7 +4277,7 @@ paths:
content:
application/json:
schema:
$ref: '#/components/schemas/AuthtypesPostableRole'
$ref: '#/components/schemas/RoletypesPostableRole'
responses:
"201":
content:
@@ -4422,7 +4422,7 @@ paths:
schema:
properties:
data:
$ref: '#/components/schemas/AuthtypesRole'
$ref: '#/components/schemas/RoletypesRole'
status:
type: string
required:
@@ -4470,7 +4470,7 @@ paths:
content:
application/json:
schema:
$ref: '#/components/schemas/AuthtypesPatchableRole'
$ref: '#/components/schemas/RoletypesPatchableRole'
responses:
"204":
content:

View File

@@ -13,6 +13,7 @@ import (
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/roletypes"
"github.com/SigNoz/signoz/pkg/valuer"
openfgav1 "github.com/openfga/api/proto/openfga/v1"
openfgapkgtransformer "github.com/openfga/language/pkg/go/transformer"
@@ -22,7 +23,7 @@ type provider struct {
pkgAuthzService authz.AuthZ
openfgaServer *openfgaserver.Server
licensing licensing.Licensing
store authtypes.RoleStore
store roletypes.Store
registry []authz.RegisterTypeable
}
@@ -81,23 +82,23 @@ func (provider *provider) Write(ctx context.Context, additions []*openfgav1.Tupl
return provider.openfgaServer.Write(ctx, additions, deletions)
}
func (provider *provider) Get(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*authtypes.Role, error) {
func (provider *provider) Get(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*roletypes.Role, error) {
return provider.pkgAuthzService.Get(ctx, orgID, id)
}
func (provider *provider) GetByOrgIDAndName(ctx context.Context, orgID valuer.UUID, name string) (*authtypes.Role, error) {
func (provider *provider) GetByOrgIDAndName(ctx context.Context, orgID valuer.UUID, name string) (*roletypes.Role, error) {
return provider.pkgAuthzService.GetByOrgIDAndName(ctx, orgID, name)
}
func (provider *provider) List(ctx context.Context, orgID valuer.UUID) ([]*authtypes.Role, error) {
func (provider *provider) List(ctx context.Context, orgID valuer.UUID) ([]*roletypes.Role, error) {
return provider.pkgAuthzService.List(ctx, orgID)
}
func (provider *provider) ListByOrgIDAndNames(ctx context.Context, orgID valuer.UUID, names []string) ([]*authtypes.Role, error) {
func (provider *provider) ListByOrgIDAndNames(ctx context.Context, orgID valuer.UUID, names []string) ([]*roletypes.Role, error) {
return provider.pkgAuthzService.ListByOrgIDAndNames(ctx, orgID, names)
}
func (provider *provider) ListByOrgIDAndIDs(ctx context.Context, orgID valuer.UUID, ids []valuer.UUID) ([]*authtypes.Role, error) {
func (provider *provider) ListByOrgIDAndIDs(ctx context.Context, orgID valuer.UUID, ids []valuer.UUID) ([]*roletypes.Role, error) {
return provider.pkgAuthzService.ListByOrgIDAndIDs(ctx, orgID, ids)
}
@@ -113,7 +114,7 @@ func (provider *provider) Revoke(ctx context.Context, orgID valuer.UUID, names [
return provider.pkgAuthzService.Revoke(ctx, orgID, names, subject)
}
func (provider *provider) CreateManagedRoles(ctx context.Context, orgID valuer.UUID, managedRoles []*authtypes.Role) error {
func (provider *provider) CreateManagedRoles(ctx context.Context, orgID valuer.UUID, managedRoles []*roletypes.Role) error {
return provider.pkgAuthzService.CreateManagedRoles(ctx, orgID, managedRoles)
}
@@ -135,16 +136,16 @@ func (provider *provider) CreateManagedUserRoleTransactions(ctx context.Context,
return provider.Write(ctx, tuples, nil)
}
func (provider *provider) Create(ctx context.Context, orgID valuer.UUID, role *authtypes.Role) error {
func (provider *provider) Create(ctx context.Context, orgID valuer.UUID, role *roletypes.Role) error {
_, err := provider.licensing.GetActive(ctx, orgID)
if err != nil {
return errors.New(errors.TypeLicenseUnavailable, errors.CodeLicenseUnavailable, "a valid license is not available").WithAdditional("this feature requires a valid license").WithAdditional(err.Error())
}
return provider.store.Create(ctx, authtypes.NewStorableRoleFromRole(role))
return provider.store.Create(ctx, roletypes.NewStorableRoleFromRole(role))
}
func (provider *provider) GetOrCreate(ctx context.Context, orgID valuer.UUID, role *authtypes.Role) (*authtypes.Role, error) {
func (provider *provider) GetOrCreate(ctx context.Context, orgID valuer.UUID, role *roletypes.Role) (*roletypes.Role, error) {
_, err := provider.licensing.GetActive(ctx, orgID)
if err != nil {
return nil, errors.New(errors.TypeLicenseUnavailable, errors.CodeLicenseUnavailable, "a valid license is not available").WithAdditional("this feature requires a valid license").WithAdditional(err.Error())
@@ -158,10 +159,10 @@ func (provider *provider) GetOrCreate(ctx context.Context, orgID valuer.UUID, ro
}
if existingRole != nil {
return authtypes.NewRoleFromStorableRole(existingRole), nil
return roletypes.NewRoleFromStorableRole(existingRole), nil
}
err = provider.store.Create(ctx, authtypes.NewStorableRoleFromRole(role))
err = provider.store.Create(ctx, roletypes.NewStorableRoleFromRole(role))
if err != nil {
return nil, err
}
@@ -216,13 +217,13 @@ func (provider *provider) GetObjects(ctx context.Context, orgID valuer.UUID, id
return objects, nil
}
func (provider *provider) Patch(ctx context.Context, orgID valuer.UUID, role *authtypes.Role) error {
func (provider *provider) Patch(ctx context.Context, orgID valuer.UUID, role *roletypes.Role) error {
_, err := provider.licensing.GetActive(ctx, orgID)
if err != nil {
return errors.New(errors.TypeLicenseUnavailable, errors.CodeLicenseUnavailable, "a valid license is not available").WithAdditional("this feature requires a valid license").WithAdditional(err.Error())
}
return provider.store.Update(ctx, orgID, authtypes.NewStorableRoleFromRole(role))
return provider.store.Update(ctx, orgID, roletypes.NewStorableRoleFromRole(role))
}
func (provider *provider) PatchObjects(ctx context.Context, orgID valuer.UUID, name string, relation authtypes.Relation, additions, deletions []*authtypes.Object) error {
@@ -231,12 +232,12 @@ func (provider *provider) PatchObjects(ctx context.Context, orgID valuer.UUID, n
return errors.New(errors.TypeLicenseUnavailable, errors.CodeLicenseUnavailable, "a valid license is not available").WithAdditional("this feature requires a valid license").WithAdditional(err.Error())
}
additionTuples, err := authtypes.GetAdditionTuples(name, orgID, relation, additions)
additionTuples, err := roletypes.GetAdditionTuples(name, orgID, relation, additions)
if err != nil {
return err
}
deletionTuples, err := authtypes.GetDeletionTuples(name, orgID, relation, deletions)
deletionTuples, err := roletypes.GetDeletionTuples(name, orgID, relation, deletions)
if err != nil {
return err
}
@@ -260,7 +261,7 @@ func (provider *provider) Delete(ctx context.Context, orgID valuer.UUID, id valu
return err
}
role := authtypes.NewRoleFromStorableRole(storableRole)
role := roletypes.NewRoleFromStorableRole(storableRole)
err = role.ErrIfManaged()
if err != nil {
return err
@@ -270,7 +271,7 @@ func (provider *provider) Delete(ctx context.Context, orgID valuer.UUID, id valu
}
func (provider *provider) MustGetTypeables() []authtypes.Typeable {
return []authtypes.Typeable{authtypes.TypeableRole, authtypes.TypeableResourcesRoles}
return []authtypes.Typeable{authtypes.TypeableRole, roletypes.TypeableResourcesRoles}
}
func (provider *provider) getManagedRoleGrantTuples(orgID valuer.UUID, userID valuer.UUID) ([]*openfgav1.TupleKey, error) {
@@ -282,7 +283,7 @@ func (provider *provider) getManagedRoleGrantTuples(orgID valuer.UUID, userID va
adminSubject,
authtypes.RelationAssignee,
[]authtypes.Selector{
authtypes.MustNewSelector(authtypes.TypeRole, authtypes.SigNozAdminRoleName),
authtypes.MustNewSelector(authtypes.TypeRole, roletypes.SigNozAdminRoleName),
},
orgID,
)
@@ -297,7 +298,7 @@ func (provider *provider) getManagedRoleGrantTuples(orgID valuer.UUID, userID va
anonymousSubject,
authtypes.RelationAssignee,
[]authtypes.Selector{
authtypes.MustNewSelector(authtypes.TypeRole, authtypes.SigNozAnonymousRoleName),
authtypes.MustNewSelector(authtypes.TypeRole, roletypes.SigNozAnonymousRoleName),
},
orgID,
)

View File

@@ -19,6 +19,7 @@ import (
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
"github.com/SigNoz/signoz/pkg/types/instrumentationtypes"
"github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/roletypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
@@ -223,7 +224,7 @@ func (module *module) MustGetTypeables() []authtypes.Typeable {
func (module *module) MustGetManagedRoleTransactions() map[string][]*authtypes.Transaction {
return map[string][]*authtypes.Transaction{
authtypes.SigNozAnonymousRoleName: {
roletypes.SigNozAnonymousRoleName: {
{
ID: valuer.GenerateUUID(),
Relation: authtypes.RelationRead,

View File

@@ -80,21 +80,6 @@ func TestManager_TestNotification_SendUnmatched_ThresholdRule(t *testing.T) {
alertDataRows := cmock.NewRows(cols, tc.Values)
mock := telemetryStore.Mock()
// Mock metadata queries for FetchTemporalityAndTypeMulti
// First query: fetchMetricsTemporalityAndType (from signoz_metrics time series table)
metadataCols := []cmock.ColumnType{
{Name: "metric_name", Type: "String"},
{Name: "temporality", Type: "String"},
{Name: "type", Type: "String"},
{Name: "is_monotonic", Type: "Bool"},
}
metadataRows := cmock.NewRows(metadataCols, [][]any{
{"probe_success", metrictypes.Unspecified, metrictypes.GaugeType, false},
})
mock.ExpectQuery("*distributed_time_series_v4*").WithArgs(nil, nil, nil).WillReturnRows(metadataRows)
// Second query: fetchMeterSourceMetricsTemporalityAndType (from signoz_meter table)
emptyMetadataRows := cmock.NewRows(metadataCols, [][]any{})
mock.ExpectQuery("*meter*").WithArgs(nil).WillReturnRows(emptyMetadataRows)
// Generate query arguments for the metric query
evalTime := time.Now().UTC()

View File

@@ -223,8 +223,7 @@ SELECT
i.indisunique AS unique,
i.indisprimary AS primary,
a.attname AS column_name,
array_position(i.indkey, a.attnum) AS column_position,
pg_get_expr(i.indpred, i.indrelid) AS predicate
array_position(i.indkey, a.attnum) AS column_position
FROM
pg_index i
LEFT JOIN pg_class ct ON ct.oid = i.indrelid
@@ -247,12 +246,7 @@ ORDER BY index_name, column_position`, string(name))
}
}()
type indexEntry struct {
columns []sqlschema.ColumnName
predicate *string
}
uniqueIndicesMap := make(map[string]*indexEntry)
uniqueIndicesMap := make(map[string]*sqlschema.UniqueIndex)
for rows.Next() {
var (
tableName string
@@ -262,50 +256,30 @@ ORDER BY index_name, column_position`, string(name))
columnName string
// starts from 0 and is unused in this function, this is to ensure that the column names are in the correct order
columnPosition int
predicate *string
)
if err := rows.Scan(&tableName, &indexName, &unique, &primary, &columnName, &columnPosition, &predicate); err != nil {
if err := rows.Scan(&tableName, &indexName, &unique, &primary, &columnName, &columnPosition); err != nil {
return nil, err
}
if unique {
if _, ok := uniqueIndicesMap[indexName]; !ok {
uniqueIndicesMap[indexName] = &indexEntry{
columns: []sqlschema.ColumnName{sqlschema.ColumnName(columnName)},
predicate: predicate,
uniqueIndicesMap[indexName] = &sqlschema.UniqueIndex{
TableName: name,
ColumnNames: []sqlschema.ColumnName{sqlschema.ColumnName(columnName)},
}
} else {
uniqueIndicesMap[indexName].columns = append(uniqueIndicesMap[indexName].columns, sqlschema.ColumnName(columnName))
uniqueIndicesMap[indexName].ColumnNames = append(uniqueIndicesMap[indexName].ColumnNames, sqlschema.ColumnName(columnName))
}
}
}
indices := make([]sqlschema.Index, 0)
for indexName, entry := range uniqueIndicesMap {
if entry.predicate != nil {
index := &sqlschema.PartialUniqueIndex{
TableName: name,
ColumnNames: entry.columns,
Where: *entry.predicate,
}
if index.Name() == indexName {
indices = append(indices, index)
} else {
indices = append(indices, index.Named(indexName))
}
for indexName, index := range uniqueIndicesMap {
if index.Name() == indexName {
indices = append(indices, index)
} else {
index := &sqlschema.UniqueIndex{
TableName: name,
ColumnNames: entry.columns,
}
if index.Name() == indexName {
indices = append(indices, index)
} else {
indices = append(indices, index.Named(indexName))
}
indices = append(indices, index.Named(indexName))
}
}

View File

@@ -11,7 +11,6 @@
"prettify": "prettier --write .",
"fmt": "prettier --check .",
"lint": "eslint ./src",
"lint:generated": "eslint ./src/api/generated --fix",
"lint:fix": "eslint ./src --fix",
"jest": "jest",
"jest:coverage": "jest --coverage",
@@ -284,4 +283,4 @@
"tmp": "0.2.4",
"vite": "npm:rolldown-vite@7.3.1"
}
}
}

View File

@@ -25,7 +25,7 @@ echo "\n✅ Prettier formatting successful"
# Fix linting issues
echo "\n\n---\nRunning eslint...\n"
if ! yarn lint:generated; then
if ! yarn lint --fix --quiet src/api/generated; then
echo "ESLint check failed! Please fix linting errors before proceeding."
exit 1
fi

View File

@@ -21,8 +21,6 @@ import type { BodyType, ErrorType } from '../../../generatedAPIInstance';
import { GeneratedAPIInstance } from '../../../generatedAPIInstance';
import type {
AuthtypesPatchableObjectsDTO,
AuthtypesPatchableRoleDTO,
AuthtypesPostableRoleDTO,
CreateRole201,
DeleteRolePathParameters,
GetObjects200,
@@ -33,6 +31,8 @@ import type {
PatchObjectsPathParameters,
PatchRolePathParameters,
RenderErrorResponseDTO,
RoletypesPatchableRoleDTO,
RoletypesPostableRoleDTO,
} from '../sigNoz.schemas';
/**
@@ -118,14 +118,14 @@ export const invalidateListRoles = async (
* @summary Create role
*/
export const createRole = (
authtypesPostableRoleDTO: BodyType<AuthtypesPostableRoleDTO>,
roletypesPostableRoleDTO: BodyType<RoletypesPostableRoleDTO>,
signal?: AbortSignal,
) => {
return GeneratedAPIInstance<CreateRole201>({
url: `/api/v1/roles`,
method: 'POST',
headers: { 'Content-Type': 'application/json' },
data: authtypesPostableRoleDTO,
data: roletypesPostableRoleDTO,
signal,
});
};
@@ -137,13 +137,13 @@ export const getCreateRoleMutationOptions = <
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof createRole>>,
TError,
{ data: BodyType<AuthtypesPostableRoleDTO> },
{ data: BodyType<RoletypesPostableRoleDTO> },
TContext
>;
}): UseMutationOptions<
Awaited<ReturnType<typeof createRole>>,
TError,
{ data: BodyType<AuthtypesPostableRoleDTO> },
{ data: BodyType<RoletypesPostableRoleDTO> },
TContext
> => {
const mutationKey = ['createRole'];
@@ -157,7 +157,7 @@ export const getCreateRoleMutationOptions = <
const mutationFn: MutationFunction<
Awaited<ReturnType<typeof createRole>>,
{ data: BodyType<AuthtypesPostableRoleDTO> }
{ data: BodyType<RoletypesPostableRoleDTO> }
> = (props) => {
const { data } = props ?? {};
@@ -170,7 +170,7 @@ export const getCreateRoleMutationOptions = <
export type CreateRoleMutationResult = NonNullable<
Awaited<ReturnType<typeof createRole>>
>;
export type CreateRoleMutationBody = BodyType<AuthtypesPostableRoleDTO>;
export type CreateRoleMutationBody = BodyType<RoletypesPostableRoleDTO>;
export type CreateRoleMutationError = ErrorType<RenderErrorResponseDTO>;
/**
@@ -183,13 +183,13 @@ export const useCreateRole = <
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof createRole>>,
TError,
{ data: BodyType<AuthtypesPostableRoleDTO> },
{ data: BodyType<RoletypesPostableRoleDTO> },
TContext
>;
}): UseMutationResult<
Awaited<ReturnType<typeof createRole>>,
TError,
{ data: BodyType<AuthtypesPostableRoleDTO> },
{ data: BodyType<RoletypesPostableRoleDTO> },
TContext
> => {
const mutationOptions = getCreateRoleMutationOptions(options);
@@ -370,13 +370,13 @@ export const invalidateGetRole = async (
*/
export const patchRole = (
{ id }: PatchRolePathParameters,
authtypesPatchableRoleDTO: BodyType<AuthtypesPatchableRoleDTO>,
roletypesPatchableRoleDTO: BodyType<RoletypesPatchableRoleDTO>,
) => {
return GeneratedAPIInstance<string>({
url: `/api/v1/roles/${id}`,
method: 'PATCH',
headers: { 'Content-Type': 'application/json' },
data: authtypesPatchableRoleDTO,
data: roletypesPatchableRoleDTO,
});
};
@@ -389,7 +389,7 @@ export const getPatchRoleMutationOptions = <
TError,
{
pathParams: PatchRolePathParameters;
data: BodyType<AuthtypesPatchableRoleDTO>;
data: BodyType<RoletypesPatchableRoleDTO>;
},
TContext
>;
@@ -398,7 +398,7 @@ export const getPatchRoleMutationOptions = <
TError,
{
pathParams: PatchRolePathParameters;
data: BodyType<AuthtypesPatchableRoleDTO>;
data: BodyType<RoletypesPatchableRoleDTO>;
},
TContext
> => {
@@ -415,7 +415,7 @@ export const getPatchRoleMutationOptions = <
Awaited<ReturnType<typeof patchRole>>,
{
pathParams: PatchRolePathParameters;
data: BodyType<AuthtypesPatchableRoleDTO>;
data: BodyType<RoletypesPatchableRoleDTO>;
}
> = (props) => {
const { pathParams, data } = props ?? {};
@@ -429,7 +429,7 @@ export const getPatchRoleMutationOptions = <
export type PatchRoleMutationResult = NonNullable<
Awaited<ReturnType<typeof patchRole>>
>;
export type PatchRoleMutationBody = BodyType<AuthtypesPatchableRoleDTO>;
export type PatchRoleMutationBody = BodyType<RoletypesPatchableRoleDTO>;
export type PatchRoleMutationError = ErrorType<RenderErrorResponseDTO>;
/**
@@ -444,7 +444,7 @@ export const usePatchRole = <
TError,
{
pathParams: PatchRolePathParameters;
data: BodyType<AuthtypesPatchableRoleDTO>;
data: BodyType<RoletypesPatchableRoleDTO>;
},
TContext
>;
@@ -453,7 +453,7 @@ export const usePatchRole = <
TError,
{
pathParams: PatchRolePathParameters;
data: BodyType<AuthtypesPatchableRoleDTO>;
data: BodyType<RoletypesPatchableRoleDTO>;
},
TContext
> => {

View File

@@ -278,13 +278,6 @@ export interface AuthtypesPatchableObjectsDTO {
deletions: AuthtypesGettableObjectsDTO[] | null;
}
export interface AuthtypesPatchableRoleDTO {
/**
* @type string
*/
description: string;
}
export interface AuthtypesPostableAuthDomainDTO {
config?: AuthtypesAuthDomainConfigDTO;
/**
@@ -308,17 +301,6 @@ export interface AuthtypesPostableEmailPasswordSessionDTO {
password?: string;
}
export interface AuthtypesPostableRoleDTO {
/**
* @type string
*/
description?: string;
/**
* @type string
*/
name: string;
}
export interface AuthtypesPostableRotateTokenDTO {
/**
* @type string
@@ -337,39 +319,6 @@ export interface AuthtypesResourceDTO {
type: string;
}
export interface AuthtypesRoleDTO {
/**
* @type string
* @format date-time
*/
createdAt?: Date;
/**
* @type string
*/
description: string;
/**
* @type string
*/
id: string;
/**
* @type string
*/
name: string;
/**
* @type string
*/
orgId: string;
/**
* @type string
*/
type: string;
/**
* @type string
* @format date-time
*/
updatedAt?: Date;
}
/**
* @nullable
*/
@@ -2090,6 +2039,57 @@ export interface RenderErrorResponseDTO {
status: string;
}
export interface RoletypesPatchableRoleDTO {
/**
* @type string
*/
description: string;
}
export interface RoletypesPostableRoleDTO {
/**
* @type string
*/
description?: string;
/**
* @type string
*/
name: string;
}
export interface RoletypesRoleDTO {
/**
* @type string
* @format date-time
*/
createdAt?: Date;
/**
* @type string
*/
description: string;
/**
* @type string
*/
id: string;
/**
* @type string
*/
name: string;
/**
* @type string
*/
orgId: string;
/**
* @type string
*/
type: string;
/**
* @type string
* @format date-time
*/
updatedAt?: Date;
}
export interface ServiceaccounttypesFactorAPIKeyDTO {
/**
* @type string
@@ -3163,7 +3163,7 @@ export type ListRoles200 = {
/**
* @type array
*/
data: AuthtypesRoleDTO[];
data: RoletypesRoleDTO[];
/**
* @type string
*/
@@ -3185,7 +3185,7 @@ export type GetRolePathParameters = {
id: string;
};
export type GetRole200 = {
data: AuthtypesRoleDTO;
data: RoletypesRoleDTO;
/**
* @type string
*/

View File

@@ -13,8 +13,8 @@ import {
usePatchRole,
} from 'api/generated/services/role';
import {
AuthtypesPostableRoleDTO,
RenderErrorResponseDTO,
RoletypesPostableRoleDTO,
} from 'api/generated/services/sigNoz.schemas';
import { ErrorType } from 'api/generatedAPIInstance';
import ROUTES from 'constants/routes';
@@ -114,7 +114,7 @@ function CreateRoleModal({
data: { description: values.description || '' },
});
} else {
const data: AuthtypesPostableRoleDTO = {
const data: RoletypesPostableRoleDTO = {
name: values.name,
...(values.description ? { description: values.description } : {}),
};

View File

@@ -2,7 +2,7 @@ import { useCallback, useEffect, useMemo } from 'react';
import { useHistory } from 'react-router-dom';
import { Pagination, Skeleton } from 'antd';
import { useListRoles } from 'api/generated/services/role';
import { AuthtypesRoleDTO } from 'api/generated/services/sigNoz.schemas';
import { RoletypesRoleDTO } from 'api/generated/services/sigNoz.schemas';
import ErrorInPlace from 'components/ErrorInPlace/ErrorInPlace';
import { DATE_TIME_FORMATS } from 'constants/dateTimeFormats';
import ROUTES from 'constants/routes';
@@ -20,7 +20,7 @@ const PAGE_SIZE = 20;
type DisplayItem =
| { type: 'section'; label: string; count?: number }
| { type: 'role'; role: AuthtypesRoleDTO };
| { type: 'role'; role: RoletypesRoleDTO };
interface RolesListingTableProps {
searchQuery: string;
@@ -187,7 +187,7 @@ function RolesListingTable({
};
// todo: use table from periscope when its available for consumption
const renderRow = (role: AuthtypesRoleDTO): JSX.Element => (
const renderRow = (role: RoletypesRoleDTO): JSX.Element => (
<div
key={role.id}
className={`roles-table-row ${

View File

@@ -1,8 +1,8 @@
import { AuthtypesRoleDTO } from 'api/generated/services/sigNoz.schemas';
import { RoletypesRoleDTO } from 'api/generated/services/sigNoz.schemas';
const orgId = '019ba2bb-2fa1-7b24-8159-cfca08617ef9';
export const managedRoles: AuthtypesRoleDTO[] = [
export const managedRoles: RoletypesRoleDTO[] = [
{
id: '019c24aa-2248-756f-9833-984f1ab63819',
createdAt: new Date('2026-02-03T18:00:55.624356Z'),
@@ -35,7 +35,7 @@ export const managedRoles: AuthtypesRoleDTO[] = [
},
];
export const customRoles: AuthtypesRoleDTO[] = [
export const customRoles: RoletypesRoleDTO[] = [
{
id: '019c24aa-3333-0001-aaaa-111111111111',
createdAt: new Date('2026-02-10T10:30:00.000Z'),
@@ -56,7 +56,7 @@ export const customRoles: AuthtypesRoleDTO[] = [
},
];
export const allRoles: AuthtypesRoleDTO[] = [...managedRoles, ...customRoles];
export const allRoles: RoletypesRoleDTO[] = [...managedRoles, ...customRoles];
export const listRolesSuccessResponse = {
status: 'success',

View File

@@ -1,8 +1,9 @@
import { sentryVitePlugin } from '@sentry/vite-plugin';
import react from '@vitejs/plugin-react';
import { readFileSync } from 'fs';
import { resolve } from 'path';
import { visualizer } from 'rollup-plugin-visualizer';
import type { Plugin, TransformResult, UserConfig } from 'vite';
import type { Plugin, UserConfig } from 'vite';
import { defineConfig, loadEnv } from 'vite';
import vitePluginChecker from 'vite-plugin-checker';
import viteCompression from 'vite-plugin-compression';
@@ -13,14 +14,15 @@ import tsconfigPaths from 'vite-tsconfig-paths';
function rawMarkdownPlugin(): Plugin {
return {
name: 'raw-markdown',
transform(code, id): TransformResult | undefined {
if (!id.endsWith('.md')) {
return undefined;
transform(_, id): any {
if (id.endsWith('.md')) {
const content = readFileSync(id, 'utf-8');
return {
code: `export default ${JSON.stringify(content)};`,
map: null,
};
}
return {
code: `export default ${JSON.stringify(code)};`,
map: null,
};
return undefined;
},
};
}
@@ -69,7 +71,7 @@ export default defineConfig(
);
}
if (mode === 'production') {
if (env.NODE_ENV === 'production') {
plugins.push(
ViteImageOptimizer({
jpeg: { quality: 80 },
@@ -100,25 +102,22 @@ export default defineConfig(
},
define: {
// TODO: Remove this in favor of import.meta.env
'process.env.NODE_ENV': JSON.stringify(mode),
'process.env.FRONTEND_API_ENDPOINT': JSON.stringify(
env.VITE_FRONTEND_API_ENDPOINT,
),
'process.env.WEBSOCKET_API_ENDPOINT': JSON.stringify(
env.VITE_WEBSOCKET_API_ENDPOINT,
),
'process.env.PYLON_APP_ID': JSON.stringify(env.VITE_PYLON_APP_ID),
'process.env.PYLON_IDENTITY_SECRET': JSON.stringify(
env.VITE_PYLON_IDENTITY_SECRET,
),
'process.env.APPCUES_APP_ID': JSON.stringify(env.VITE_APPCUES_APP_ID),
'process.env.POSTHOG_KEY': JSON.stringify(env.VITE_POSTHOG_KEY),
'process.env.SENTRY_ORG': JSON.stringify(env.VITE_SENTRY_ORG),
'process.env.SENTRY_PROJECT_ID': JSON.stringify(env.VITE_SENTRY_PROJECT_ID),
'process.env.SENTRY_DSN': JSON.stringify(env.VITE_SENTRY_DSN),
'process.env.TUNNEL_URL': JSON.stringify(env.VITE_TUNNEL_URL),
'process.env.TUNNEL_DOMAIN': JSON.stringify(env.VITE_TUNNEL_DOMAIN),
'process.env.DOCS_BASE_URL': JSON.stringify(env.VITE_DOCS_BASE_URL),
'process.env': JSON.stringify({
NODE_ENV: mode,
FRONTEND_API_ENDPOINT: env.VITE_FRONTEND_API_ENDPOINT,
WEBSOCKET_API_ENDPOINT: env.VITE_WEBSOCKET_API_ENDPOINT,
PYLON_APP_ID: env.VITE_PYLON_APP_ID,
PYLON_IDENTITY_SECRET: env.VITE_PYLON_IDENTITY_SECRET,
APPCUES_APP_ID: env.VITE_APPCUES_APP_ID,
POSTHOG_KEY: env.VITE_POSTHOG_KEY,
SENTRY_AUTH_TOKEN: env.VITE_SENTRY_AUTH_TOKEN,
SENTRY_ORG: env.VITE_SENTRY_ORG,
SENTRY_PROJECT_ID: env.VITE_SENTRY_PROJECT_ID,
SENTRY_DSN: env.VITE_SENTRY_DSN,
TUNNEL_URL: env.VITE_TUNNEL_URL,
TUNNEL_DOMAIN: env.VITE_TUNNEL_DOMAIN,
DOCS_BASE_URL: env.VITE_DOCS_BASE_URL,
}),
},
build: {
sourcemap: true,

7
go.mod
View File

@@ -107,6 +107,8 @@ require (
github.com/aws/smithy-go v1.24.0 // indirect
github.com/bytedance/gopkg v0.1.3 // indirect
github.com/bytedance/sonic/loader v0.3.0 // indirect
github.com/c-bata/go-prompt v0.2.6 // indirect
github.com/chdb-io/chdb-go v1.11.0 // indirect
github.com/cloudwego/base64x v0.1.6 // indirect
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
github.com/go-openapi/swag/cmdutils v0.25.4 // indirect
@@ -126,11 +128,16 @@ require (
github.com/goccy/go-yaml v1.19.2 // indirect
github.com/hashicorp/go-metrics v0.5.4 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mattn/go-tty v0.0.5 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/pkg/term v1.2.0-beta.2 // indirect
github.com/prometheus/client_golang/exp v0.0.0-20260108101519-fb0838f53562 // indirect
github.com/redis/go-redis/extra/rediscmd/v9 v9.15.1 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/swaggest/refl v1.4.0 // indirect
github.com/swaggest/usecase v1.3.1 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect

21
go.sum
View File

@@ -206,6 +206,8 @@ github.com/bytedance/sonic v1.14.1 h1:FBMC0zVz5XUmE4z9wF4Jey0An5FueFvOsTKKKtwIl7
github.com/bytedance/sonic v1.14.1/go.mod h1:gi6uhQLMbTdeP0muCnrjHLeCUPyb70ujhnNlhOylAFc=
github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA=
github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
github.com/c-bata/go-prompt v0.2.6 h1:POP+nrHE+DfLYx370bedwNhsqmpCUynWPxuHi0C5vZI=
github.com/c-bata/go-prompt v0.2.6/go.mod h1:/LMAke8wD2FsNu9EXNdHxNLbd9MedkPnCdfpU9wwHfY=
github.com/cactus/go-statsd-client/statsd v0.0.0-20200423205355-cb0885a1018c/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
@@ -216,6 +218,8 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chdb-io/chdb-go v1.11.0 h1:G6+Oy1onzNL3bSxncGfIdiB6beTpxwKztjfai7qLckE=
github.com/chdb-io/chdb-go v1.11.0/go.mod h1:RkT+xLXhdBKtUtJJPwhQQR4p6qiXHisJNS712QldDg8=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -760,6 +764,7 @@ github.com/mattermost/xml-roundtrip-validator v0.1.0/go.mod h1:qccnGMcpgwcNaBnxq
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
@@ -773,6 +778,14 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-tty v0.0.3/go.mod h1:ihxohKRERHTVzN+aSVRwACLCeqIoZAWpoICkkvrWyR0=
github.com/mattn/go-tty v0.0.5 h1:s09uXI7yDbXzzTTfw3zonKFzwGkyYlgU3OMjqA0ddz4=
github.com/mattn/go-tty v0.0.5/go.mod h1:u5GGXBtZU6RQoKV8gY5W6UhMudbR5vXnUe7j3pxse28=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
@@ -900,6 +913,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/pkg/term v1.2.0-beta.2 h1:L3y/h2jkuBVFdWiJvNfYfKmzcCnILw7mJWm2JQuMppw=
github.com/pkg/term v1.2.0-beta.2/go.mod h1:E25nymQcrSllhX42Ok8MRm1+hyBdHY0dCeiKZ9jpNGw=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -961,6 +976,9 @@ github.com/redis/go-redis/v9 v9.17.2/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
@@ -1533,6 +1551,7 @@ golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1554,6 +1573,8 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200918174421-af09f7315aff/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View File

@@ -6,6 +6,7 @@ import (
"github.com/SigNoz/signoz/pkg/http/handler"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/roletypes"
"github.com/gorilla/mux"
)
@@ -15,7 +16,7 @@ func (provider *provider) addRoleRoutes(router *mux.Router) error {
Tags: []string{"role"},
Summary: "Create role",
Description: "This endpoint creates a role",
Request: new(authtypes.PostableRole),
Request: new(roletypes.PostableRole),
RequestContentType: "",
Response: new(types.Identifiable),
ResponseContentType: "application/json",
@@ -34,7 +35,7 @@ func (provider *provider) addRoleRoutes(router *mux.Router) error {
Description: "This endpoint lists all roles",
Request: nil,
RequestContentType: "",
Response: make([]*authtypes.Role, 0),
Response: make([]*roletypes.Role, 0),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{},
@@ -51,7 +52,7 @@ func (provider *provider) addRoleRoutes(router *mux.Router) error {
Description: "This endpoint gets a role",
Request: nil,
RequestContentType: "",
Response: new(authtypes.Role),
Response: new(roletypes.Role),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{},
@@ -83,7 +84,7 @@ func (provider *provider) addRoleRoutes(router *mux.Router) error {
Tags: []string{"role"},
Summary: "Patch role",
Description: "This endpoint patches a role",
Request: new(authtypes.PatchableRole),
Request: new(roletypes.PatchableRole),
RequestContentType: "",
Response: nil,
ResponseContentType: "application/json",

View File

@@ -6,6 +6,7 @@ import (
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/roletypes"
"github.com/SigNoz/signoz/pkg/valuer"
openfgav1 "github.com/openfga/api/proto/openfga/v1"
)
@@ -29,10 +30,10 @@ type AuthZ interface {
ListObjects(context.Context, string, authtypes.Relation, authtypes.Typeable) ([]*authtypes.Object, error)
// Creates the role.
Create(context.Context, valuer.UUID, *authtypes.Role) error
Create(context.Context, valuer.UUID, *roletypes.Role) error
// Gets the role if it exists or creates one.
GetOrCreate(context.Context, valuer.UUID, *authtypes.Role) (*authtypes.Role, error)
GetOrCreate(context.Context, valuer.UUID, *roletypes.Role) (*roletypes.Role, error)
// Gets the objects associated with the given role and relation.
GetObjects(context.Context, valuer.UUID, valuer.UUID, authtypes.Relation) ([]*authtypes.Object, error)
@@ -41,7 +42,7 @@ type AuthZ interface {
GetResources(context.Context) []*authtypes.Resource
// Patches the role.
Patch(context.Context, valuer.UUID, *authtypes.Role) error
Patch(context.Context, valuer.UUID, *roletypes.Role) error
// Patches the objects in authorization server associated with the given role and relation
PatchObjects(context.Context, valuer.UUID, string, authtypes.Relation, []*authtypes.Object, []*authtypes.Object) error
@@ -50,19 +51,19 @@ type AuthZ interface {
Delete(context.Context, valuer.UUID, valuer.UUID) error
// Gets the role
Get(context.Context, valuer.UUID, valuer.UUID) (*authtypes.Role, error)
Get(context.Context, valuer.UUID, valuer.UUID) (*roletypes.Role, error)
// Gets the role by org_id and name
GetByOrgIDAndName(context.Context, valuer.UUID, string) (*authtypes.Role, error)
GetByOrgIDAndName(context.Context, valuer.UUID, string) (*roletypes.Role, error)
// Lists all the roles for the organization.
List(context.Context, valuer.UUID) ([]*authtypes.Role, error)
List(context.Context, valuer.UUID) ([]*roletypes.Role, error)
// Lists all the roles for the organization filtered by name
ListByOrgIDAndNames(context.Context, valuer.UUID, []string) ([]*authtypes.Role, error)
ListByOrgIDAndNames(context.Context, valuer.UUID, []string) ([]*roletypes.Role, error)
// Lists all the roles for the organization filtered by ids
ListByOrgIDAndIDs(context.Context, valuer.UUID, []valuer.UUID) ([]*authtypes.Role, error)
ListByOrgIDAndIDs(context.Context, valuer.UUID, []valuer.UUID) ([]*roletypes.Role, error)
// Grants a role to the subject based on role name.
Grant(context.Context, valuer.UUID, []string, string) error
@@ -74,7 +75,7 @@ type AuthZ interface {
ModifyGrant(context.Context, valuer.UUID, []string, []string, string) error
// Bootstrap the managed roles.
CreateManagedRoles(context.Context, valuer.UUID, []*authtypes.Role) error
CreateManagedRoles(context.Context, valuer.UUID, []*roletypes.Role) error
// Bootstrap managed roles transactions and user assignments
CreateManagedUserRoleTransactions(context.Context, valuer.UUID, valuer.UUID) error

View File

@@ -5,7 +5,7 @@ import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/roletypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
)
@@ -14,11 +14,11 @@ type store struct {
sqlstore sqlstore.SQLStore
}
func NewSqlAuthzStore(sqlstore sqlstore.SQLStore) authtypes.RoleStore {
func NewSqlAuthzStore(sqlstore sqlstore.SQLStore) roletypes.Store {
return &store{sqlstore: sqlstore}
}
func (store *store) Create(ctx context.Context, role *authtypes.StorableRole) error {
func (store *store) Create(ctx context.Context, role *roletypes.StorableRole) error {
_, err := store.
sqlstore.
BunDBCtx(ctx).
@@ -32,8 +32,8 @@ func (store *store) Create(ctx context.Context, role *authtypes.StorableRole) er
return nil
}
func (store *store) Get(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*authtypes.StorableRole, error) {
role := new(authtypes.StorableRole)
func (store *store) Get(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*roletypes.StorableRole, error) {
role := new(roletypes.StorableRole)
err := store.
sqlstore.
BunDBCtx(ctx).
@@ -43,14 +43,14 @@ func (store *store) Get(ctx context.Context, orgID valuer.UUID, id valuer.UUID)
Where("id = ?", id).
Scan(ctx)
if err != nil {
return nil, store.sqlstore.WrapNotFoundErrf(err, authtypes.ErrCodeRoleNotFound, "role with id: %s doesn't exist", id)
return nil, store.sqlstore.WrapNotFoundErrf(err, roletypes.ErrCodeRoleNotFound, "role with id: %s doesn't exist", id)
}
return role, nil
}
func (store *store) GetByOrgIDAndName(ctx context.Context, orgID valuer.UUID, name string) (*authtypes.StorableRole, error) {
role := new(authtypes.StorableRole)
func (store *store) GetByOrgIDAndName(ctx context.Context, orgID valuer.UUID, name string) (*roletypes.StorableRole, error) {
role := new(roletypes.StorableRole)
err := store.
sqlstore.
BunDBCtx(ctx).
@@ -60,14 +60,14 @@ func (store *store) GetByOrgIDAndName(ctx context.Context, orgID valuer.UUID, na
Where("name = ?", name).
Scan(ctx)
if err != nil {
return nil, store.sqlstore.WrapNotFoundErrf(err, authtypes.ErrCodeRoleNotFound, "role with name: %s doesn't exist", name)
return nil, store.sqlstore.WrapNotFoundErrf(err, roletypes.ErrCodeRoleNotFound, "role with name: %s doesn't exist", name)
}
return role, nil
}
func (store *store) List(ctx context.Context, orgID valuer.UUID) ([]*authtypes.StorableRole, error) {
roles := make([]*authtypes.StorableRole, 0)
func (store *store) List(ctx context.Context, orgID valuer.UUID) ([]*roletypes.StorableRole, error) {
roles := make([]*roletypes.StorableRole, 0)
err := store.
sqlstore.
BunDBCtx(ctx).
@@ -82,8 +82,8 @@ func (store *store) List(ctx context.Context, orgID valuer.UUID) ([]*authtypes.S
return roles, nil
}
func (store *store) ListByOrgIDAndNames(ctx context.Context, orgID valuer.UUID, names []string) ([]*authtypes.StorableRole, error) {
roles := make([]*authtypes.StorableRole, 0)
func (store *store) ListByOrgIDAndNames(ctx context.Context, orgID valuer.UUID, names []string) ([]*roletypes.StorableRole, error) {
roles := make([]*roletypes.StorableRole, 0)
err := store.
sqlstore.
BunDBCtx(ctx).
@@ -99,7 +99,7 @@ func (store *store) ListByOrgIDAndNames(ctx context.Context, orgID valuer.UUID,
if len(roles) != len(names) {
return nil, store.sqlstore.WrapNotFoundErrf(
nil,
authtypes.ErrCodeRoleNotFound,
roletypes.ErrCodeRoleNotFound,
"not all roles found for the provided names: %v", names,
)
}
@@ -107,8 +107,8 @@ func (store *store) ListByOrgIDAndNames(ctx context.Context, orgID valuer.UUID,
return roles, nil
}
func (store *store) ListByOrgIDAndIDs(ctx context.Context, orgID valuer.UUID, ids []valuer.UUID) ([]*authtypes.StorableRole, error) {
roles := make([]*authtypes.StorableRole, 0)
func (store *store) ListByOrgIDAndIDs(ctx context.Context, orgID valuer.UUID, ids []valuer.UUID) ([]*roletypes.StorableRole, error) {
roles := make([]*roletypes.StorableRole, 0)
err := store.
sqlstore.
BunDBCtx(ctx).
@@ -124,7 +124,7 @@ func (store *store) ListByOrgIDAndIDs(ctx context.Context, orgID valuer.UUID, id
if len(roles) != len(ids) {
return nil, store.sqlstore.WrapNotFoundErrf(
nil,
authtypes.ErrCodeRoleNotFound,
roletypes.ErrCodeRoleNotFound,
"not all roles found for the provided ids: %v", ids,
)
}
@@ -132,7 +132,7 @@ func (store *store) ListByOrgIDAndIDs(ctx context.Context, orgID valuer.UUID, id
return roles, nil
}
func (store *store) Update(ctx context.Context, orgID valuer.UUID, role *authtypes.StorableRole) error {
func (store *store) Update(ctx context.Context, orgID valuer.UUID, role *roletypes.StorableRole) error {
_, err := store.
sqlstore.
BunDBCtx(ctx).
@@ -153,12 +153,12 @@ func (store *store) Delete(ctx context.Context, orgID valuer.UUID, id valuer.UUI
sqlstore.
BunDBCtx(ctx).
NewDelete().
Model(new(authtypes.StorableRole)).
Model(new(roletypes.StorableRole)).
Where("org_id = ?", orgID).
Where("id = ?", id).
Exec(ctx)
if err != nil {
return store.sqlstore.WrapNotFoundErrf(err, authtypes.ErrCodeRoleNotFound, "role with id %s doesn't exist", id)
return store.sqlstore.WrapNotFoundErrf(err, roletypes.ErrCodeRoleNotFound, "role with id %s doesn't exist", id)
}
return nil

View File

@@ -8,6 +8,7 @@ import (
"github.com/SigNoz/signoz/pkg/authz/openfgaserver"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/roletypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/SigNoz/signoz/pkg/factory"
@@ -18,7 +19,7 @@ import (
type provider struct {
server *openfgaserver.Server
store authtypes.RoleStore
store roletypes.Store
}
func NewProviderFactory(sqlstore sqlstore.SQLStore, openfgaSchema []openfgapkgtransformer.ModuleFile) factory.ProviderFactory[authz.AuthZ, authz.Config] {
@@ -67,61 +68,61 @@ func (provider *provider) ListObjects(ctx context.Context, subject string, relat
return provider.server.ListObjects(ctx, subject, relation, typeable)
}
func (provider *provider) Get(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*authtypes.Role, error) {
func (provider *provider) Get(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*roletypes.Role, error) {
storableRole, err := provider.store.Get(ctx, orgID, id)
if err != nil {
return nil, err
}
return authtypes.NewRoleFromStorableRole(storableRole), nil
return roletypes.NewRoleFromStorableRole(storableRole), nil
}
func (provider *provider) GetByOrgIDAndName(ctx context.Context, orgID valuer.UUID, name string) (*authtypes.Role, error) {
func (provider *provider) GetByOrgIDAndName(ctx context.Context, orgID valuer.UUID, name string) (*roletypes.Role, error) {
storableRole, err := provider.store.GetByOrgIDAndName(ctx, orgID, name)
if err != nil {
return nil, err
}
return authtypes.NewRoleFromStorableRole(storableRole), nil
return roletypes.NewRoleFromStorableRole(storableRole), nil
}
func (provider *provider) List(ctx context.Context, orgID valuer.UUID) ([]*authtypes.Role, error) {
func (provider *provider) List(ctx context.Context, orgID valuer.UUID) ([]*roletypes.Role, error) {
storableRoles, err := provider.store.List(ctx, orgID)
if err != nil {
return nil, err
}
roles := make([]*authtypes.Role, len(storableRoles))
roles := make([]*roletypes.Role, len(storableRoles))
for idx, storableRole := range storableRoles {
roles[idx] = authtypes.NewRoleFromStorableRole(storableRole)
roles[idx] = roletypes.NewRoleFromStorableRole(storableRole)
}
return roles, nil
}
func (provider *provider) ListByOrgIDAndNames(ctx context.Context, orgID valuer.UUID, names []string) ([]*authtypes.Role, error) {
func (provider *provider) ListByOrgIDAndNames(ctx context.Context, orgID valuer.UUID, names []string) ([]*roletypes.Role, error) {
storableRoles, err := provider.store.ListByOrgIDAndNames(ctx, orgID, names)
if err != nil {
return nil, err
}
roles := make([]*authtypes.Role, len(storableRoles))
roles := make([]*roletypes.Role, len(storableRoles))
for idx, storable := range storableRoles {
roles[idx] = authtypes.NewRoleFromStorableRole(storable)
roles[idx] = roletypes.NewRoleFromStorableRole(storable)
}
return roles, nil
}
func (provider *provider) ListByOrgIDAndIDs(ctx context.Context, orgID valuer.UUID, ids []valuer.UUID) ([]*authtypes.Role, error) {
func (provider *provider) ListByOrgIDAndIDs(ctx context.Context, orgID valuer.UUID, ids []valuer.UUID) ([]*roletypes.Role, error) {
storableRoles, err := provider.store.ListByOrgIDAndIDs(ctx, orgID, ids)
if err != nil {
return nil, err
}
roles := make([]*authtypes.Role, len(storableRoles))
roles := make([]*roletypes.Role, len(storableRoles))
for idx, storable := range storableRoles {
roles[idx] = authtypes.NewRoleFromStorableRole(storable)
roles[idx] = roletypes.NewRoleFromStorableRole(storable)
}
return roles, nil
@@ -178,10 +179,10 @@ func (provider *provider) Revoke(ctx context.Context, orgID valuer.UUID, names [
return provider.Write(ctx, nil, tuples)
}
func (provider *provider) CreateManagedRoles(ctx context.Context, _ valuer.UUID, managedRoles []*authtypes.Role) error {
func (provider *provider) CreateManagedRoles(ctx context.Context, _ valuer.UUID, managedRoles []*roletypes.Role) error {
err := provider.store.RunInTx(ctx, func(ctx context.Context) error {
for _, role := range managedRoles {
err := provider.store.Create(ctx, authtypes.NewStorableRoleFromRole(role))
err := provider.store.Create(ctx, roletypes.NewStorableRoleFromRole(role))
if err != nil {
return err
}
@@ -198,15 +199,15 @@ func (provider *provider) CreateManagedRoles(ctx context.Context, _ valuer.UUID,
}
func (provider *provider) CreateManagedUserRoleTransactions(ctx context.Context, orgID valuer.UUID, userID valuer.UUID) error {
return provider.Grant(ctx, orgID, []string{authtypes.SigNozAdminRoleName}, authtypes.MustNewSubject(authtypes.TypeableUser, userID.String(), orgID, nil))
return provider.Grant(ctx, orgID, []string{roletypes.SigNozAdminRoleName}, authtypes.MustNewSubject(authtypes.TypeableUser, userID.String(), orgID, nil))
}
func (setter *provider) Create(_ context.Context, _ valuer.UUID, _ *authtypes.Role) error {
return errors.Newf(errors.TypeUnsupported, authtypes.ErrCodeRoleUnsupported, "not implemented")
func (setter *provider) Create(_ context.Context, _ valuer.UUID, _ *roletypes.Role) error {
return errors.Newf(errors.TypeUnsupported, roletypes.ErrCodeRoleUnsupported, "not implemented")
}
func (provider *provider) GetOrCreate(_ context.Context, _ valuer.UUID, _ *authtypes.Role) (*authtypes.Role, error) {
return nil, errors.Newf(errors.TypeUnsupported, authtypes.ErrCodeRoleUnsupported, "not implemented")
func (provider *provider) GetOrCreate(_ context.Context, _ valuer.UUID, _ *roletypes.Role) (*roletypes.Role, error) {
return nil, errors.Newf(errors.TypeUnsupported, roletypes.ErrCodeRoleUnsupported, "not implemented")
}
func (provider *provider) GetResources(_ context.Context) []*authtypes.Resource {
@@ -214,19 +215,19 @@ func (provider *provider) GetResources(_ context.Context) []*authtypes.Resource
}
func (provider *provider) GetObjects(ctx context.Context, orgID valuer.UUID, id valuer.UUID, relation authtypes.Relation) ([]*authtypes.Object, error) {
return nil, errors.Newf(errors.TypeUnsupported, authtypes.ErrCodeRoleUnsupported, "not implemented")
return nil, errors.Newf(errors.TypeUnsupported, roletypes.ErrCodeRoleUnsupported, "not implemented")
}
func (provider *provider) Patch(_ context.Context, _ valuer.UUID, _ *authtypes.Role) error {
return errors.Newf(errors.TypeUnsupported, authtypes.ErrCodeRoleUnsupported, "not implemented")
func (provider *provider) Patch(_ context.Context, _ valuer.UUID, _ *roletypes.Role) error {
return errors.Newf(errors.TypeUnsupported, roletypes.ErrCodeRoleUnsupported, "not implemented")
}
func (provider *provider) PatchObjects(_ context.Context, _ valuer.UUID, _ string, _ authtypes.Relation, _, _ []*authtypes.Object) error {
return errors.Newf(errors.TypeUnsupported, authtypes.ErrCodeRoleUnsupported, "not implemented")
return errors.Newf(errors.TypeUnsupported, roletypes.ErrCodeRoleUnsupported, "not implemented")
}
func (provider *provider) Delete(_ context.Context, _ valuer.UUID, _ valuer.UUID) error {
return errors.Newf(errors.TypeUnsupported, authtypes.ErrCodeRoleUnsupported, "not implemented")
return errors.Newf(errors.TypeUnsupported, roletypes.ErrCodeRoleUnsupported, "not implemented")
}
func (provider *provider) MustGetTypeables() []authtypes.Typeable {

View File

@@ -9,6 +9,7 @@ import (
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/roletypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/gorilla/mux"
)
@@ -29,13 +30,13 @@ func (handler *handler) Create(rw http.ResponseWriter, r *http.Request) {
return
}
req := new(authtypes.PostableRole)
req := new(roletypes.PostableRole)
if err := binding.JSON.BindBody(r.Body, req); err != nil {
render.Error(rw, err)
return
}
role := authtypes.NewRole(req.Name, req.Description, authtypes.RoleTypeCustom, valuer.MustNewUUID(claims.OrgID))
role := roletypes.NewRole(req.Name, req.Description, roletypes.RoleTypeCustom, valuer.MustNewUUID(claims.OrgID))
err = handler.authz.Create(ctx, valuer.MustNewUUID(claims.OrgID), role)
if err != nil {
render.Error(rw, err)
@@ -55,7 +56,7 @@ func (handler *handler) Get(rw http.ResponseWriter, r *http.Request) {
id, ok := mux.Vars(r)["id"]
if !ok {
render.Error(rw, errors.New(errors.TypeInvalidInput, authtypes.ErrCodeRoleInvalidInput, "id is missing from the request"))
render.Error(rw, errors.New(errors.TypeInvalidInput, roletypes.ErrCodeRoleInvalidInput, "id is missing from the request"))
return
}
roleID, err := valuer.NewUUID(id)
@@ -83,7 +84,7 @@ func (handler *handler) GetObjects(rw http.ResponseWriter, r *http.Request) {
id, ok := mux.Vars(r)["id"]
if !ok {
render.Error(rw, errors.New(errors.TypeInvalidInput, authtypes.ErrCodeRoleInvalidInput, "id is missing from the request"))
render.Error(rw, errors.New(errors.TypeInvalidInput, roletypes.ErrCodeRoleInvalidInput, "id is missing from the request"))
return
}
roleID, err := valuer.NewUUID(id)
@@ -94,7 +95,7 @@ func (handler *handler) GetObjects(rw http.ResponseWriter, r *http.Request) {
relationStr, ok := mux.Vars(r)["relation"]
if !ok {
render.Error(rw, errors.New(errors.TypeInvalidInput, authtypes.ErrCodeRoleInvalidInput, "relation is missing from the request"))
render.Error(rw, errors.New(errors.TypeInvalidInput, roletypes.ErrCodeRoleInvalidInput, "relation is missing from the request"))
return
}
relation, err := authtypes.NewRelation(relationStr)
@@ -149,7 +150,7 @@ func (handler *handler) Patch(rw http.ResponseWriter, r *http.Request) {
return
}
req := new(authtypes.PatchableRole)
req := new(roletypes.PatchableRole)
if err := binding.JSON.BindBody(r.Body, req); err != nil {
render.Error(rw, err)
return

View File

@@ -10,6 +10,7 @@ import (
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/ctxtypes"
"github.com/SigNoz/signoz/pkg/types/roletypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/gorilla/mux"
)
@@ -55,9 +56,9 @@ func (middleware *AuthZ) ViewAccess(next http.HandlerFunc) http.HandlerFunc {
}
selectors := []authtypes.Selector{
authtypes.MustNewSelector(authtypes.TypeRole, authtypes.SigNozAdminRoleName),
authtypes.MustNewSelector(authtypes.TypeRole, authtypes.SigNozEditorRoleName),
authtypes.MustNewSelector(authtypes.TypeRole, authtypes.SigNozViewerRoleName),
authtypes.MustNewSelector(authtypes.TypeRole, roletypes.SigNozAdminRoleName),
authtypes.MustNewSelector(authtypes.TypeRole, roletypes.SigNozEditorRoleName),
authtypes.MustNewSelector(authtypes.TypeRole, roletypes.SigNozViewerRoleName),
}
err = middleware.authzService.CheckWithTupleCreation(
@@ -107,8 +108,8 @@ func (middleware *AuthZ) EditAccess(next http.HandlerFunc) http.HandlerFunc {
}
selectors := []authtypes.Selector{
authtypes.MustNewSelector(authtypes.TypeRole, authtypes.SigNozAdminRoleName),
authtypes.MustNewSelector(authtypes.TypeRole, authtypes.SigNozEditorRoleName),
authtypes.MustNewSelector(authtypes.TypeRole, roletypes.SigNozAdminRoleName),
authtypes.MustNewSelector(authtypes.TypeRole, roletypes.SigNozEditorRoleName),
}
err = middleware.authzService.CheckWithTupleCreation(
@@ -158,7 +159,7 @@ func (middleware *AuthZ) AdminAccess(next http.HandlerFunc) http.HandlerFunc {
}
selectors := []authtypes.Selector{
authtypes.MustNewSelector(authtypes.TypeRole, authtypes.SigNozAdminRoleName),
authtypes.MustNewSelector(authtypes.TypeRole, roletypes.SigNozAdminRoleName),
}
err = middleware.authzService.CheckWithTupleCreation(

View File

@@ -17,34 +17,28 @@ import (
// todo: will move this in types layer with service account integration
type apiKeyTokenKey struct{}
type provider struct {
type resolver struct {
store sqlstore.SQLStore
config identn.Config
headers []string
settings factory.ScopedProviderSettings
sfGroup *singleflight.Group
}
func NewFactory(store sqlstore.SQLStore) factory.ProviderFactory[identn.IdentN, identn.Config] {
return factory.NewProviderFactory(factory.MustNewName(authtypes.IdentNProviderAPIkey.StringValue()), func(ctx context.Context, providerSettings factory.ProviderSettings, config identn.Config) (identn.IdentN, error) {
return New(providerSettings, store, config)
})
}
func New(providerSettings factory.ProviderSettings, store sqlstore.SQLStore, config identn.Config) (identn.IdentN, error) {
return &provider{
func New(providerSettings factory.ProviderSettings, store sqlstore.SQLStore, headers []string) identn.IdentN {
return &resolver{
store: store,
config: config,
headers: headers,
settings: factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/pkg/identn/apikeyidentn"),
sfGroup: &singleflight.Group{},
}, nil
}
}
func (provider *provider) Name() authtypes.IdentNProvider {
func (r *resolver) Name() authtypes.IdentNProvider {
return authtypes.IdentNProviderAPIkey
}
func (provider *provider) Test(req *http.Request) bool {
for _, header := range provider.config.APIKeyConfig.Headers {
func (r *resolver) Test(req *http.Request) bool {
for _, header := range r.headers {
if req.Header.Get(header) != "" {
return true
}
@@ -52,12 +46,8 @@ func (provider *provider) Test(req *http.Request) bool {
return false
}
func (provider *provider) Enabled() bool {
return provider.config.APIKeyConfig.Enabled
}
func (provider *provider) Pre(req *http.Request) *http.Request {
token := provider.extractToken(req)
func (r *resolver) Pre(req *http.Request) *http.Request {
token := r.extractToken(req)
if token == "" {
return req
}
@@ -66,16 +56,16 @@ func (provider *provider) Pre(req *http.Request) *http.Request {
return req.WithContext(ctx)
}
func (provider *provider) GetIdentity(req *http.Request) (*authtypes.Identity, error) {
func (r *resolver) GetIdentity(req *http.Request) (*authtypes.Identity, error) {
ctx := req.Context()
apiKeyToken, ok := ctx.Value(apiKeyTokenKey{}).(string)
if !ok || apiKeyToken == "" {
return nil, errors.New(errors.TypeUnauthenticated, errors.CodeUnauthenticated, "missing api key")
}
var apiKey types.StorableAPIKey
err := provider.
store.
err := r.store.
BunDB().
NewSelect().
Model(&apiKey).
@@ -90,8 +80,7 @@ func (provider *provider) GetIdentity(req *http.Request) (*authtypes.Identity, e
}
var user types.User
err = provider.
store.
err = r.store.
BunDB().
NewSelect().
Model(&user).
@@ -110,15 +99,14 @@ func (provider *provider) GetIdentity(req *http.Request) (*authtypes.Identity, e
return &identity, nil
}
func (provider *provider) Post(ctx context.Context, _ *http.Request, _ authtypes.Claims) {
func (r *resolver) Post(ctx context.Context, _ *http.Request, _ authtypes.Claims) {
apiKeyToken, ok := ctx.Value(apiKeyTokenKey{}).(string)
if !ok || apiKeyToken == "" {
return
}
_, _, _ = provider.sfGroup.Do(apiKeyToken, func() (any, error) {
_, err := provider.
store.
_, _, _ = r.sfGroup.Do(apiKeyToken, func() (any, error) {
_, err := r.store.
BunDB().
NewUpdate().
Model(new(types.StorableAPIKey)).
@@ -127,14 +115,14 @@ func (provider *provider) Post(ctx context.Context, _ *http.Request, _ authtypes
Where("revoked = false").
Exec(ctx)
if err != nil {
provider.settings.Logger().ErrorContext(ctx, "failed to update last used of api key", "error", err)
r.settings.Logger().ErrorContext(ctx, "failed to update last used of api key", "error", err)
}
return true, nil
})
}
func (provider *provider) extractToken(req *http.Request) string {
for _, header := range provider.config.APIKeyConfig.Headers {
func (r *resolver) extractToken(req *http.Request) string {
for _, header := range r.headers {
if v := req.Header.Get(header); v != "" {
return v
}

View File

@@ -1,48 +0,0 @@
package identn
import (
"github.com/SigNoz/signoz/pkg/factory"
)
type Config struct {
// Config for tokenizer identN resolver
Tokenizer TokenizerConfig `mapstructure:"tokenizer"`
// Config for apikey identN resolver
APIKeyConfig APIKeyConfig `mapstructure:"apikey"`
}
type TokenizerConfig struct {
// Toggles the identN resolver
Enabled bool `mapstructure:"enabled"`
// Headers to extract from incoming requests
Headers []string `mapstructure:"headers"`
}
type APIKeyConfig struct {
// Toggles the identN resolver
Enabled bool `mapstructure:"enabled"`
// Headers to extract from incoming requests
Headers []string `mapstructure:"headers"`
}
func NewConfigFactory() factory.ConfigFactory {
return factory.NewConfigFactory(factory.MustNewName("identn"), newConfig)
}
func newConfig() factory.Config {
return &Config{
Tokenizer: TokenizerConfig{
Enabled: true,
Headers: []string{"Authorization", "Sec-WebSocket-Protocol"},
},
APIKeyConfig: APIKeyConfig{
Enabled: true,
Headers: []string{"SIGNOZ-API-KEY"},
},
}
}
func (c Config) Validate() error {
return nil
}

View File

@@ -23,8 +23,6 @@ type IdentN interface {
GetIdentity(r *http.Request) (*authtypes.Identity, error)
Name() authtypes.IdentNProvider
Enabled() bool
}
// IdentNWithPreHook is optionally implemented by resolvers that need to

View File

@@ -12,16 +12,8 @@ type identNResolver struct {
}
func NewIdentNResolver(providerSettings factory.ProviderSettings, identNs ...IdentN) IdentNResolver {
enabledIdentNs := []IdentN{}
for _, identN := range identNs {
if identN.Enabled() {
enabledIdentNs = append(enabledIdentNs, identN)
}
}
return &identNResolver{
identNs: enabledIdentNs,
identNs: identNs,
settings: factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/pkg/identn"),
}
}

View File

@@ -13,34 +13,28 @@ import (
"golang.org/x/sync/singleflight"
)
type provider struct {
type resolver struct {
tokenizer tokenizer.Tokenizer
config identn.Config
headers []string
settings factory.ScopedProviderSettings
sfGroup *singleflight.Group
}
func NewFactory(tokenizer tokenizer.Tokenizer) factory.ProviderFactory[identn.IdentN, identn.Config] {
return factory.NewProviderFactory(factory.MustNewName(authtypes.IdentNProviderTokenizer.StringValue()), func(ctx context.Context, providerSettings factory.ProviderSettings, config identn.Config) (identn.IdentN, error) {
return New(providerSettings, tokenizer, config)
})
}
func New(providerSettings factory.ProviderSettings, tokenizer tokenizer.Tokenizer, config identn.Config) (identn.IdentN, error) {
return &provider{
func New(providerSettings factory.ProviderSettings, tokenizer tokenizer.Tokenizer, headers []string) identn.IdentN {
return &resolver{
tokenizer: tokenizer,
config: config,
headers: headers,
settings: factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/pkg/identn/tokenizeridentn"),
sfGroup: &singleflight.Group{},
}, nil
}
}
func (provider *provider) Name() authtypes.IdentNProvider {
func (r *resolver) Name() authtypes.IdentNProvider {
return authtypes.IdentNProviderTokenizer
}
func (provider *provider) Test(req *http.Request) bool {
for _, header := range provider.config.Tokenizer.Headers {
func (r *resolver) Test(req *http.Request) bool {
for _, header := range r.headers {
if req.Header.Get(header) != "" {
return true
}
@@ -48,12 +42,8 @@ func (provider *provider) Test(req *http.Request) bool {
return false
}
func (provider *provider) Enabled() bool {
return provider.config.Tokenizer.Enabled
}
func (provider *provider) Pre(req *http.Request) *http.Request {
accessToken := provider.extractToken(req)
func (r *resolver) Pre(req *http.Request) *http.Request {
accessToken := r.extractToken(req)
if accessToken == "" {
return req
}
@@ -62,7 +52,7 @@ func (provider *provider) Pre(req *http.Request) *http.Request {
return req.WithContext(ctx)
}
func (provider *provider) GetIdentity(req *http.Request) (*authtypes.Identity, error) {
func (r *resolver) GetIdentity(req *http.Request) (*authtypes.Identity, error) {
ctx := req.Context()
accessToken, err := authtypes.AccessTokenFromContext(ctx)
@@ -70,45 +60,41 @@ func (provider *provider) GetIdentity(req *http.Request) (*authtypes.Identity, e
return nil, err
}
return provider.tokenizer.GetIdentity(ctx, accessToken)
return r.tokenizer.GetIdentity(ctx, accessToken)
}
func (provider *provider) Post(ctx context.Context, _ *http.Request, _ authtypes.Claims) {
if !provider.config.Tokenizer.Enabled {
return
}
func (r *resolver) Post(ctx context.Context, _ *http.Request, _ authtypes.Claims) {
accessToken, err := authtypes.AccessTokenFromContext(ctx)
if err != nil {
return
}
_, _, _ = provider.sfGroup.Do(accessToken, func() (any, error) {
if err := provider.tokenizer.SetLastObservedAt(ctx, accessToken, time.Now()); err != nil {
provider.settings.Logger().ErrorContext(ctx, "failed to set last observed at", "error", err)
_, _, _ = r.sfGroup.Do(accessToken, func() (any, error) {
if err := r.tokenizer.SetLastObservedAt(ctx, accessToken, time.Now()); err != nil {
r.settings.Logger().ErrorContext(ctx, "failed to set last observed at", "error", err)
return false, err
}
return true, nil
})
}
func (provider *provider) extractToken(req *http.Request) string {
func (r *resolver) extractToken(req *http.Request) string {
var value string
for _, header := range provider.config.Tokenizer.Headers {
for _, header := range r.headers {
if v := req.Header.Get(header); v != "" {
value = v
break
}
}
accessToken, ok := provider.parseBearerAuth(value)
accessToken, ok := r.parseBearerAuth(value)
if !ok {
return value
}
return accessToken
}
func (provider *provider) parseBearerAuth(auth string) (string, bool) {
func (r *resolver) parseBearerAuth(auth string) (string, bool) {
const prefix = "Bearer "
if len(auth) < len(prefix) || !strings.EqualFold(auth[:len(prefix)], prefix) {
return "", false

View File

@@ -1,65 +0,0 @@
package cloudintegration
import (
"context"
"net/http"
citypes "github.com/SigNoz/signoz/pkg/types/cloudintegrationtypes"
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type Module interface {
CreateAccount(ctx context.Context, account *citypes.Account) error
// GetAccount returns cloud integration account
GetAccount(ctx context.Context, orgID, accountID valuer.UUID) (*citypes.Account, error)
// ListAccounts lists accounts where agent is connected
ListAccounts(ctx context.Context, orgID valuer.UUID) ([]*citypes.Account, error)
// UpdateAccount updates the cloud integration account for a specific organization.
UpdateAccount(ctx context.Context, account *citypes.Account) error
// DisconnectAccount soft deletes/removes a cloud integration account.
DisconnectAccount(ctx context.Context, orgID, accountID valuer.UUID) error
// GetConnectionArtifact returns cloud provider specific connection information,
// client side handles how this information is shown
GetConnectionArtifact(ctx context.Context, account *citypes.Account, req *citypes.ConnectionArtifactRequest) (*citypes.ConnectionArtifact, error)
// ListServicesMetadata returns the list of services metadata for a cloud provider attached with the integrationID.
// This just returns a summary of the service and not the whole service definition
ListServicesMetadata(ctx context.Context, orgID valuer.UUID, integrationID *valuer.UUID) ([]*citypes.ServiceMetadata, error)
// GetService returns service definition details for a serviceID. This returns config and
// other details required to show in service details page on web client.
GetService(ctx context.Context, orgID valuer.UUID, integrationID *valuer.UUID, serviceID string) (*citypes.Service, error)
// UpdateService updates cloud integration service
UpdateService(ctx context.Context, orgID valuer.UUID, service *citypes.CloudIntegrationService) error
// AgentCheckIn is called by agent to heartbeat and get latest config in response.
AgentCheckIn(ctx context.Context, orgID valuer.UUID, req *citypes.AgentCheckInRequest) (*citypes.AgentCheckInResponse, error)
// GetDashboardByID returns dashboard JSON for a given dashboard id.
// this only returns the dashboard when the service (embedded in dashboard id) is enabled
// in the org for any cloud integration account
GetDashboardByID(ctx context.Context, orgID valuer.UUID, id string) (*dashboardtypes.Dashboard, error)
// ListDashboards returns list of dashboards across all connected cloud integration accounts
// for enabled services in the org. This list gets added to dashboard list page
ListDashboards(ctx context.Context, orgID valuer.UUID) ([]*dashboardtypes.Dashboard, error)
}
type Handler interface {
GetConnectionArtifact(http.ResponseWriter, *http.Request)
ListAccounts(http.ResponseWriter, *http.Request)
GetAccount(http.ResponseWriter, *http.Request)
UpdateAccount(http.ResponseWriter, *http.Request)
DisconnectAccount(http.ResponseWriter, *http.Request)
ListServicesMetadata(http.ResponseWriter, *http.Request)
GetService(http.ResponseWriter, *http.Request)
UpdateService(http.ResponseWriter, *http.Request)
AgentCheckIn(http.ResponseWriter, *http.Request)
}

View File

@@ -1,134 +0,0 @@
package implcloudintegration
import (
"context"
"time"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/cloudintegrationtypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type store struct {
store sqlstore.SQLStore
}
func NewStore(sqlStore sqlstore.SQLStore) cloudintegrationtypes.Store {
return &store{store: sqlStore}
}
func (s *store) GetAccountByID(ctx context.Context, orgID, id valuer.UUID, provider cloudintegrationtypes.CloudProviderType) (*cloudintegrationtypes.StorableCloudIntegration, error) {
account := new(cloudintegrationtypes.StorableCloudIntegration)
err := s.store.BunDBCtx(ctx).NewSelect().Model(account).
Where("id = ?", id).
Where("org_id = ?", orgID).
Where("provider = ?", provider).
Scan(ctx)
if err != nil {
return nil, s.store.WrapNotFoundErrf(err, cloudintegrationtypes.ErrCodeCloudIntegrationNotFound, "cloud integration account with id %s not found", id)
}
return account, nil
}
func (s *store) ListConnectedAccounts(ctx context.Context, orgID valuer.UUID, provider cloudintegrationtypes.CloudProviderType) ([]*cloudintegrationtypes.StorableCloudIntegration, error) {
var accounts []*cloudintegrationtypes.StorableCloudIntegration
err := s.store.BunDBCtx(ctx).NewSelect().Model(&accounts).
Where("org_id = ?", orgID).
Where("provider = ?", provider).
Where("removed_at IS NULL").
Where("account_id IS NOT NULL").
Where("last_agent_report IS NOT NULL").
Order("created_at ASC").
Scan(ctx)
if err != nil {
return nil, err
}
return accounts, nil
}
func (s *store) CreateAccount(ctx context.Context, account *cloudintegrationtypes.StorableCloudIntegration) (*cloudintegrationtypes.StorableCloudIntegration, error) {
_, err := s.store.BunDBCtx(ctx).NewInsert().Model(account).Exec(ctx)
if err != nil {
return nil, s.store.WrapAlreadyExistsErrf(err, cloudintegrationtypes.ErrCodeCloudIntegrationAlreadyExists, "cloud integration account with id %s already exists", account.ID)
}
return account, nil
}
func (s *store) UpdateAccount(ctx context.Context, account *cloudintegrationtypes.StorableCloudIntegration) error {
_, err := s.store.BunDBCtx(ctx).
NewUpdate().
Model(account).
WherePK().
Where("org_id = ?", account.OrgID).
Where("provider = ?", account.Provider).
Exec(ctx)
return err
}
func (s *store) RemoveAccount(ctx context.Context, orgID, id valuer.UUID, provider cloudintegrationtypes.CloudProviderType) error {
_, err := s.store.BunDBCtx(ctx).NewUpdate().Model(new(cloudintegrationtypes.StorableCloudIntegration)).
Set("removed_at = ?", time.Now()).
Where("id = ?", id).
Where("org_id = ?", orgID).
Where("provider = ?", provider).
Exec(ctx)
return err
}
func (s *store) GetConnectedAccount(ctx context.Context, orgID valuer.UUID, provider cloudintegrationtypes.CloudProviderType, providerAccountID string) (*cloudintegrationtypes.StorableCloudIntegration, error) {
account := new(cloudintegrationtypes.StorableCloudIntegration)
err := s.store.BunDBCtx(ctx).NewSelect().Model(account).
Where("org_id = ?", orgID).
Where("provider = ?", provider).
Where("account_id = ?", providerAccountID).
Where("last_agent_report IS NOT NULL").
Where("removed_at IS NULL").
Scan(ctx)
if err != nil {
return nil, s.store.WrapNotFoundErrf(err, cloudintegrationtypes.ErrCodeCloudIntegrationNotFound, "connected account with provider account id %s not found", providerAccountID)
}
return account, nil
}
func (s *store) GetServiceByServiceID(ctx context.Context, cloudIntegrationID valuer.UUID, serviceID cloudintegrationtypes.ServiceID) (*cloudintegrationtypes.StorableCloudIntegrationService, error) {
service := new(cloudintegrationtypes.StorableCloudIntegrationService)
err := s.store.BunDBCtx(ctx).NewSelect().Model(service).
Where("cloud_integration_id = ?", cloudIntegrationID).
Where("type = ?", serviceID).
Scan(ctx)
if err != nil {
return nil, s.store.WrapNotFoundErrf(err, cloudintegrationtypes.ErrCodeCloudIntegrationNotFound, "cloud integration service with id %s not found", serviceID)
}
return service, nil
}
func (s *store) ListServices(ctx context.Context, cloudIntegrationID valuer.UUID) ([]*cloudintegrationtypes.StorableCloudIntegrationService, error) {
var services []*cloudintegrationtypes.StorableCloudIntegrationService
err := s.store.BunDBCtx(ctx).NewSelect().Model(&services).
Where("cloud_integration_id = ?", cloudIntegrationID).
Scan(ctx)
if err != nil {
return nil, err
}
return services, nil
}
func (s *store) CreateService(ctx context.Context, service *cloudintegrationtypes.StorableCloudIntegrationService) (*cloudintegrationtypes.StorableCloudIntegrationService, error) {
_, err := s.store.BunDBCtx(ctx).NewInsert().Model(service).Exec(ctx)
if err != nil {
return nil, s.store.WrapAlreadyExistsErrf(err, cloudintegrationtypes.ErrCodeCloudIntegrationServiceAlreadyExists, "cloud integration service with id %s already exists for integration account", service.Type)
}
return service, nil
}
func (s *store) UpdateService(ctx context.Context, service *cloudintegrationtypes.StorableCloudIntegrationService) error {
_, err := s.store.BunDBCtx(ctx).NewUpdate().Model(service).
WherePK().
Where("cloud_integration_id = ?", service.CloudIntegrationID).
Where("type = ?", service.Type).
Exec(ctx)
return err
}

View File

@@ -27,12 +27,7 @@ type OrgConfig struct {
}
type PasswordConfig struct {
Invite InviteConfig `mapstructure:"invite"`
Reset ResetConfig `mapstructure:"reset"`
}
type InviteConfig struct {
MaxTokenLifetime time.Duration `mapstructure:"max_token_lifetime"`
Reset ResetConfig `mapstructure:"reset"`
}
type ResetConfig struct {
@@ -51,9 +46,6 @@ func newConfig() factory.Config {
AllowSelf: false,
MaxTokenLifetime: 6 * time.Hour,
},
Invite: InviteConfig{
MaxTokenLifetime: 48 * time.Hour,
},
},
Root: RootConfig{
Enabled: false,
@@ -69,10 +61,6 @@ func (c Config) Validate() error {
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "user::password::reset::max_token_lifetime must be positive")
}
if c.Password.Invite.MaxTokenLifetime <= 0 {
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "user::password::invite::max_token_lifetime must be positive")
}
if c.Root.Enabled {
if c.Root.Email.IsZero() {
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "user::root::email is required when root user is enabled")

View File

@@ -19,6 +19,7 @@ import (
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/emailtypes"
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
"github.com/SigNoz/signoz/pkg/types/roletypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/dustin/go-humanize"
)
@@ -203,7 +204,7 @@ func (m *Module) CreateBulkInvite(ctx context.Context, orgID valuer.UUID, userID
resetLink := userWithToken.ResetPasswordToken.FactorPasswordResetLink(frontendBaseUrl)
tokenLifetime := m.config.Password.Invite.MaxTokenLifetime
tokenLifetime := m.config.Password.Reset.MaxTokenLifetime
humanizedTokenLifetime := strings.TrimSpace(humanize.RelTime(time.Now(), time.Now().Add(tokenLifetime), "", ""))
if err := m.emailing.SendHTML(ctx, userWithToken.User.Email.String(), "You're Invited to Join SigNoz", emailtypes.TemplateNameInvitationEmail, map[string]any{
@@ -262,7 +263,7 @@ func (module *Module) CreateUser(ctx context.Context, input *types.User, opts ..
createUserOpts := root.NewCreateUserOptions(opts...)
// since assign is idempotant multiple calls to assign won't cause issues in case of retries.
err := module.authz.Grant(ctx, input.OrgID, []string{authtypes.MustGetSigNozManagedRoleFromExistingRole(input.Role)}, authtypes.MustNewSubject(authtypes.TypeableUser, input.ID.StringValue(), input.OrgID, nil))
err := module.authz.Grant(ctx, input.OrgID, []string{roletypes.MustGetSigNozManagedRoleFromExistingRole(input.Role)}, authtypes.MustNewSubject(authtypes.TypeableUser, input.ID.StringValue(), input.OrgID, nil))
if err != nil {
return err
}
@@ -332,8 +333,8 @@ func (m *Module) UpdateUser(ctx context.Context, orgID valuer.UUID, id string, u
if user.Role != "" && user.Role != existingUser.Role {
err = m.authz.ModifyGrant(ctx,
orgID,
[]string{authtypes.MustGetSigNozManagedRoleFromExistingRole(existingUser.Role)},
[]string{authtypes.MustGetSigNozManagedRoleFromExistingRole(user.Role)},
[]string{roletypes.MustGetSigNozManagedRoleFromExistingRole(existingUser.Role)},
[]string{roletypes.MustGetSigNozManagedRoleFromExistingRole(user.Role)},
authtypes.MustNewSubject(authtypes.TypeableUser, id, orgID, nil),
)
if err != nil {
@@ -394,7 +395,7 @@ func (module *Module) DeleteUser(ctx context.Context, orgID valuer.UUID, id stri
}
// since revoke is idempotant multiple calls to revoke won't cause issues in case of retries
err = module.authz.Revoke(ctx, orgID, []string{authtypes.MustGetSigNozManagedRoleFromExistingRole(user.Role)}, authtypes.MustNewSubject(authtypes.TypeableUser, id, orgID, nil))
err = module.authz.Revoke(ctx, orgID, []string{roletypes.MustGetSigNozManagedRoleFromExistingRole(user.Role)}, authtypes.MustNewSubject(authtypes.TypeableUser, id, orgID, nil))
if err != nil {
return err
}
@@ -460,11 +461,7 @@ func (module *Module) GetOrCreateResetPasswordToken(ctx context.Context, userID
}
// create a new token
tokenLifetime := module.config.Password.Reset.MaxTokenLifetime
if user.Status == types.UserStatusPendingInvite {
tokenLifetime = module.config.Password.Invite.MaxTokenLifetime
}
resetPasswordToken, err := types.NewResetPasswordToken(password.ID, time.Now().Add(tokenLifetime))
resetPasswordToken, err := types.NewResetPasswordToken(password.ID, time.Now().Add(module.config.Password.Reset.MaxTokenLifetime))
if err != nil {
return nil, err
}
@@ -504,9 +501,6 @@ func (module *Module) ForgotPassword(ctx context.Context, orgID valuer.UUID, ema
resetLink := token.FactorPasswordResetLink(frontendBaseURL)
tokenLifetime := module.config.Password.Reset.MaxTokenLifetime
if user.Status == types.UserStatusPendingInvite {
tokenLifetime = module.config.Password.Invite.MaxTokenLifetime
}
humanizedTokenLifetime := strings.TrimSpace(humanize.RelTime(time.Now(), time.Now().Add(tokenLifetime), "", ""))
if err := module.emailing.SendHTML(
@@ -564,7 +558,7 @@ func (module *Module) UpdatePasswordByResetPasswordToken(ctx context.Context, to
if err = module.authz.Grant(
ctx,
user.OrgID,
[]string{authtypes.MustGetSigNozManagedRoleFromExistingRole(user.Role)},
[]string{roletypes.MustGetSigNozManagedRoleFromExistingRole(user.Role)},
authtypes.MustNewSubject(authtypes.TypeableUser, user.ID.StringValue(), user.OrgID, nil),
); err != nil {
return err
@@ -698,7 +692,7 @@ func (module *Module) CreateFirstUser(ctx context.Context, organization *types.O
return nil, err
}
managedRoles := authtypes.NewManagedRoles(organization.ID)
managedRoles := roletypes.NewManagedRoles(organization.ID)
err = module.authz.CreateManagedUserRoleTransactions(ctx, organization.ID, user.ID)
if err != nil {
return nil, err
@@ -799,7 +793,7 @@ func (module *Module) activatePendingUser(ctx context.Context, user *types.User)
err := module.authz.Grant(
ctx,
user.OrgID,
[]string{authtypes.MustGetSigNozManagedRoleFromExistingRole(user.Role)},
[]string{roletypes.MustGetSigNozManagedRoleFromExistingRole(user.Role)},
authtypes.MustNewSubject(authtypes.TypeableUser, user.ID.StringValue(), user.OrgID, nil),
)
if err != nil {

View File

@@ -11,6 +11,7 @@ import (
"github.com/SigNoz/signoz/pkg/modules/user"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/roletypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
@@ -158,8 +159,8 @@ func (s *service) createOrPromoteRootUser(ctx context.Context, orgID valuer.UUID
if oldRole != types.RoleAdmin {
if err := s.authz.ModifyGrant(ctx,
orgID,
[]string{authtypes.MustGetSigNozManagedRoleFromExistingRole(oldRole)},
[]string{authtypes.MustGetSigNozManagedRoleFromExistingRole(types.RoleAdmin)},
[]string{roletypes.MustGetSigNozManagedRoleFromExistingRole(oldRole)},
[]string{roletypes.MustGetSigNozManagedRoleFromExistingRole(types.RoleAdmin)},
authtypes.MustNewSubject(authtypes.TypeableUser, existingUser.ID.StringValue(), orgID, nil),
); err != nil {
return err

View File

@@ -20,7 +20,6 @@ import (
"github.com/SigNoz/signoz/pkg/types/instrumentationtypes"
"github.com/SigNoz/signoz/pkg/types/metrictypes"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/dustin/go-humanize"
"golang.org/x/exp/maps"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
@@ -159,8 +158,7 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
metricNames := make([]string, 0)
for idx, query := range req.CompositeQuery.Queries {
event.QueryType = query.Type.StringValue()
switch query.Type {
case qbtypes.QueryTypeBuilder:
if query.Type == qbtypes.QueryTypeBuilder {
if spec, ok := query.Spec.(qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]); ok {
for _, agg := range spec.Aggregations {
if agg.MetricName != "" {
@@ -238,7 +236,7 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
}
req.CompositeQuery.Queries[idx].Spec = spec
}
case qbtypes.QueryTypePromQL:
} else if query.Type == qbtypes.QueryTypePromQL {
event.MetricsUsed = true
switch spec := query.Spec.(type) {
case qbtypes.PromQuery:
@@ -249,7 +247,7 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
}
req.CompositeQuery.Queries[idx].Spec = spec
}
case qbtypes.QueryTypeClickHouseSQL:
} else if query.Type == qbtypes.QueryTypeClickHouseSQL {
switch spec := query.Spec.(type) {
case qbtypes.ClickHouseQuery:
if strings.TrimSpace(spec.Query) != "" {
@@ -258,7 +256,7 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
event.TracesUsed = strings.Contains(spec.Query, "signoz_traces")
}
}
case qbtypes.QueryTypeTraceOperator:
} else if query.Type == qbtypes.QueryTypeTraceOperator {
if spec, ok := query.Spec.(qbtypes.QueryBuilderTraceOperator); ok {
if spec.StepInterval.Seconds() == 0 {
spec.StepInterval = qbtypes.Step{
@@ -278,9 +276,23 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
}
}
// Fetch temporality for all metrics at once
var metricTemporality map[string]metrictypes.Temporality
var metricTypes map[string]metrictypes.Type
if len(metricNames) > 0 {
var err error
metricTemporality, metricTypes, err = q.metadataStore.FetchTemporalityAndTypeMulti(ctx, req.Start, req.End, metricNames...)
if err != nil {
q.logger.WarnContext(ctx, "failed to fetch metric temporality", "error", err, "metrics", metricNames)
// Continue without temporality - statement builder will handle unspecified
metricTemporality = make(map[string]metrictypes.Temporality)
metricTypes = make(map[string]metrictypes.Type)
}
q.logger.DebugContext(ctx, "fetched metric temporalities and types", "metric_temporality", metricTemporality, "metric_types", metricTypes)
}
queries := make(map[string]qbtypes.Query)
steps := make(map[string]qbtypes.Step)
missingMetrics := []string{}
for _, query := range req.CompositeQuery.Queries {
var queryName string
@@ -362,26 +374,15 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
queries[spec.Name] = bq
steps[spec.Name] = spec.StepInterval
case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]:
var metricTemporality map[string]metrictypes.Temporality
var metricTypes map[string]metrictypes.Type
if len(metricNames) > 0 {
var err error
metricTemporality, metricTypes, err = q.metadataStore.FetchTemporalityAndTypeMulti(ctx, req.Start, req.End, metricNames...)
if err != nil {
q.logger.WarnContext(ctx, "failed to fetch metric temporality", "error", err, "metrics", metricNames)
return nil, errors.NewInternalf(errors.CodeInternal, "failed to fetch metrics temporality")
}
q.logger.DebugContext(ctx, "fetched metric temporalities and types", "metric_temporality", metricTemporality, "metric_types", metricTypes)
}
for i := range spec.Aggregations {
if spec.Aggregations[i].MetricName != "" && spec.Aggregations[i].Temporality == metrictypes.Unknown {
if temp, ok := metricTemporality[spec.Aggregations[i].MetricName]; ok && temp != metrictypes.Unknown {
spec.Aggregations[i].Temporality = temp
}
}
// TODO(srikanthccv): warn when the metric is missing
if spec.Aggregations[i].Temporality == metrictypes.Unknown {
missingMetrics = append(missingMetrics, spec.Aggregations[i].MetricName)
continue
spec.Aggregations[i].Temporality = metrictypes.Unspecified
}
if spec.Aggregations[i].MetricName != "" && spec.Aggregations[i].Type == metrictypes.UnspecifiedType {
@@ -408,24 +409,6 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
}
}
}
if len(missingMetrics) > 0 {
lastSeenInfo, _ := q.metadataStore.FetchLastSeenInfoMulti(ctx, missingMetrics...)
lastSeenStr := func(name string) string {
if ts, ok := lastSeenInfo[name]; ok && ts > 0 {
ago := humanize.RelTime(time.UnixMilli(ts), time.Now(), "ago", "from now")
return fmt.Sprintf("%s (last seen %s)", name, ago)
}
return name
}
if len(missingMetrics) == 1 {
return nil, errors.NewNotFoundf(errors.CodeNotFound, "no data found for the metric %s in the query time range", lastSeenStr(missingMetrics[0]))
}
parts := make([]string, len(missingMetrics))
for i, m := range missingMetrics {
parts[i] = lastSeenStr(m)
}
return nil, errors.NewNotFoundf(errors.CodeNotFound, "no data found for the following metrics in the query time range: %s", strings.Join(parts, ", "))
}
qbResp, qbErr := q.run(ctx, orgID, queries, req, steps, event)
if qbResp != nil {
qbResp.QBEvent = event
@@ -680,7 +663,7 @@ func (q *querier) run(
}
// executeWithCache executes a query using the bucket cache
func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query qbtypes.Query, step qbtypes.Step, _ bool) (*qbtypes.Result, error) {
func (q *querier) executeWithCache(ctx context.Context, orgID valuer.UUID, query qbtypes.Query, step qbtypes.Step, noCache bool) (*qbtypes.Result, error) {
// Get cached data and missing ranges
cachedResult, missingRanges := q.bucketCache.GetMissRanges(ctx, orgID, query, step)

View File

@@ -76,21 +76,6 @@ func TestManager_TestNotification_SendUnmatched_ThresholdRule(t *testing.T) {
alertDataRows := cmock.NewRows(cols, tc.Values)
mock := mockStore.Mock()
// Mock metadata queries for FetchTemporalityAndTypeMulti
// First query: fetchMetricsTemporalityAndType (from signoz_metrics time series table)
metadataCols := []cmock.ColumnType{
{Name: "metric_name", Type: "String"},
{Name: "temporality", Type: "String"},
{Name: "type", Type: "String"},
{Name: "is_monotonic", Type: "Bool"},
}
metadataRows := cmock.NewRows(metadataCols, [][]any{
{"probe_success", metrictypes.Unspecified, metrictypes.GaugeType, false},
})
mock.ExpectQuery("*distributed_time_series_v4*").WithArgs(nil, nil, nil).WillReturnRows(metadataRows)
// Second query: fetchMeterSourceMetricsTemporalityAndType (from signoz_meter table)
emptyMetadataRows := cmock.NewRows(metadataCols, [][]any{})
mock.ExpectQuery("*meter*").WithArgs(nil).WillReturnRows(emptyMetadataRows)
// Generate query arguments for the metric query
evalTime := time.Now().UTC()

View File

@@ -20,7 +20,6 @@ import (
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/gateway"
"github.com/SigNoz/signoz/pkg/global"
"github.com/SigNoz/signoz/pkg/identn"
"github.com/SigNoz/signoz/pkg/instrumentation"
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
"github.com/SigNoz/signoz/pkg/modules/user"
@@ -114,9 +113,6 @@ type Config struct {
// User config
User user.Config `mapstructure:"user"`
// IdentN config
IdentN identn.Config `mapstructure:"identn"`
}
// DeprecatedFlags are the flags that are deprecated and scheduled for removal.
@@ -180,7 +176,6 @@ func NewConfig(ctx context.Context, logger *slog.Logger, resolverConfig config.R
metricsexplorer.NewConfigFactory(),
flagger.NewConfigFactory(),
user.NewConfigFactory(),
identn.NewConfigFactory(),
}
conf, err := config.New(ctx, resolverConfig, configFactories)

View File

@@ -22,9 +22,6 @@ import (
"github.com/SigNoz/signoz/pkg/flagger/configflagger"
"github.com/SigNoz/signoz/pkg/global"
"github.com/SigNoz/signoz/pkg/global/signozglobal"
"github.com/SigNoz/signoz/pkg/identn"
"github.com/SigNoz/signoz/pkg/identn/apikeyidentn"
"github.com/SigNoz/signoz/pkg/identn/tokenizeridentn"
"github.com/SigNoz/signoz/pkg/modules/authdomain/implauthdomain"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
@@ -274,13 +271,6 @@ func NewTokenizerProviderFactories(cache cache.Cache, sqlstore sqlstore.SQLStore
)
}
func NewIdentNProviderFactories(sqlstore sqlstore.SQLStore, tokenizer tokenizer.Tokenizer) factory.NamedMap[factory.ProviderFactory[identn.IdentN, identn.Config]] {
return factory.MustNewNamedMap(
tokenizeridentn.NewFactory(tokenizer),
apikeyidentn.NewFactory(sqlstore),
)
}
func NewGlobalProviderFactories() factory.NamedMap[factory.ProviderFactory[global.Global, global.Config]] {
return factory.MustNewNamedMap(
signozglobal.NewFactory(),

View File

@@ -17,6 +17,8 @@ import (
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/gateway"
"github.com/SigNoz/signoz/pkg/identn"
"github.com/SigNoz/signoz/pkg/identn/apikeyidentn"
"github.com/SigNoz/signoz/pkg/identn/tokenizeridentn"
"github.com/SigNoz/signoz/pkg/instrumentation"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
@@ -393,16 +395,9 @@ func New(
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, analytics, querier, telemetrystore, telemetryMetadataStore, authNs, authz, cache, queryParser, config, dashboard, userGetter)
// Initialize identN resolver
identNFactories := NewIdentNProviderFactories(sqlstore, tokenizer)
identNs := []identn.IdentN{}
for _, identNFactory := range identNFactories.GetInOrder() {
identN, err := identNFactory.New(ctx, providerSettings, config.IdentN)
if err != nil {
return nil, err
}
identNs = append(identNs, identN)
}
identNResolver := identn.NewIdentNResolver(providerSettings, identNs...)
tokenizeridentN := tokenizeridentn.New(providerSettings, tokenizer, []string{"Authorization", "Sec-WebSocket-Protocol"})
apikeyIdentN := apikeyidentn.New(providerSettings, sqlstore, []string{"SIGNOZ-API-KEY"})
identNResolver := identn.NewIdentNResolver(providerSettings, tokenizeridentN, apikeyIdentN)
userService := impluser.NewService(providerSettings, impluser.NewStore(sqlstore, providerSettings), modules.User, orgGetter, authz, config.User.Root)

View File

@@ -7,7 +7,7 @@ import (
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlschema"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/roletypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
@@ -54,7 +54,7 @@ func (migration *addManagedRoles) Up(ctx context.Context, db *bun.DB) error {
return err
}
managedRoles := []*authtypes.StorableRole{}
managedRoles := []*roletypes.StorableRole{}
for _, orgIDStr := range orgIDs {
orgID, err := valuer.NewUUID(orgIDStr)
if err != nil {
@@ -62,20 +62,20 @@ func (migration *addManagedRoles) Up(ctx context.Context, db *bun.DB) error {
}
// signoz admin
signozAdminRole := authtypes.NewRole(authtypes.SigNozAdminRoleName, authtypes.SigNozAdminRoleDescription, authtypes.RoleTypeManaged, orgID)
managedRoles = append(managedRoles, authtypes.NewStorableRoleFromRole(signozAdminRole))
signozAdminRole := roletypes.NewRole(roletypes.SigNozAdminRoleName, roletypes.SigNozAdminRoleDescription, roletypes.RoleTypeManaged, orgID)
managedRoles = append(managedRoles, roletypes.NewStorableRoleFromRole(signozAdminRole))
// signoz editor
signozEditorRole := authtypes.NewRole(authtypes.SigNozEditorRoleName, authtypes.SigNozEditorRoleDescription, authtypes.RoleTypeManaged, orgID)
managedRoles = append(managedRoles, authtypes.NewStorableRoleFromRole(signozEditorRole))
signozEditorRole := roletypes.NewRole(roletypes.SigNozEditorRoleName, roletypes.SigNozEditorRoleDescription, roletypes.RoleTypeManaged, orgID)
managedRoles = append(managedRoles, roletypes.NewStorableRoleFromRole(signozEditorRole))
// signoz viewer
signozViewerRole := authtypes.NewRole(authtypes.SigNozViewerRoleName, authtypes.SigNozViewerRoleDescription, authtypes.RoleTypeManaged, orgID)
managedRoles = append(managedRoles, authtypes.NewStorableRoleFromRole(signozViewerRole))
signozViewerRole := roletypes.NewRole(roletypes.SigNozViewerRoleName, roletypes.SigNozViewerRoleDescription, roletypes.RoleTypeManaged, orgID)
managedRoles = append(managedRoles, roletypes.NewStorableRoleFromRole(signozViewerRole))
// signoz anonymous
signozAnonymousRole := authtypes.NewRole(authtypes.SigNozAnonymousRoleName, authtypes.SigNozAnonymousRoleDescription, authtypes.RoleTypeManaged, orgID)
managedRoles = append(managedRoles, authtypes.NewStorableRoleFromRole(signozAnonymousRole))
signozAnonymousRole := roletypes.NewRole(roletypes.SigNozAnonymousRoleName, roletypes.SigNozAnonymousRoleDescription, roletypes.RoleTypeManaged, orgID)
managedRoles = append(managedRoles, roletypes.NewStorableRoleFromRole(signozAnonymousRole))
}
if len(managedRoles) > 0 {

View File

@@ -6,7 +6,7 @@ import (
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/roletypes"
"github.com/oklog/ulid/v2"
"github.com/uptrace/bun"
"github.com/uptrace/bun/dialect"
@@ -83,7 +83,7 @@ func (migration *addAnonymousPublicDashboardTransaction) Up(ctx context.Context,
INSERT INTO tuple (store, object_type, object_id, relation, _user, user_type, ulid, inserted_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT (store, object_type, object_id, relation, _user) DO NOTHING`,
storeID, "metaresource", "organization/"+orgID+"/public-dashboard/*", "read", "role:organization/"+orgID+"/role/"+authtypes.SigNozAnonymousRoleName+"#assignee", "userset", tupleID, now,
storeID, "metaresource", "organization/"+orgID+"/public-dashboard/*", "read", "role:organization/"+orgID+"/role/"+roletypes.SigNozAnonymousRoleName+"#assignee", "userset", tupleID, now,
)
if err != nil {
return err
@@ -102,7 +102,7 @@ func (migration *addAnonymousPublicDashboardTransaction) Up(ctx context.Context,
INSERT INTO changelog (store, object_type, object_id, relation, _user, operation, ulid, inserted_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT (store, ulid, object_type) DO NOTHING`,
storeID, "metaresource", "organization/"+orgID+"/public-dashboard/*", "read", "role:organization/"+orgID+"/role/"+authtypes.SigNozAnonymousRoleName+"#assignee", "TUPLE_OPERATION_WRITE", tupleID, now,
storeID, "metaresource", "organization/"+orgID+"/public-dashboard/*", "read", "role:organization/"+orgID+"/role/"+roletypes.SigNozAnonymousRoleName+"#assignee", "TUPLE_OPERATION_WRITE", tupleID, now,
)
if err != nil {
return err
@@ -113,7 +113,7 @@ func (migration *addAnonymousPublicDashboardTransaction) Up(ctx context.Context,
INSERT INTO tuple (store, object_type, object_id, relation, user_object_type, user_object_id, user_relation, user_type, ulid, inserted_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT (store, object_type, object_id, relation, user_object_type, user_object_id, user_relation) DO NOTHING`,
storeID, "metaresource", "organization/"+orgID+"/public-dashboard/*", "read", "role", "organization/"+orgID+"/role/"+authtypes.SigNozAnonymousRoleName, "assignee", "userset", tupleID, now,
storeID, "metaresource", "organization/"+orgID+"/public-dashboard/*", "read", "role", "organization/"+orgID+"/role/"+roletypes.SigNozAnonymousRoleName, "assignee", "userset", tupleID, now,
)
if err != nil {
return err
@@ -132,7 +132,7 @@ func (migration *addAnonymousPublicDashboardTransaction) Up(ctx context.Context,
INSERT INTO changelog (store, object_type, object_id, relation, user_object_type, user_object_id, user_relation, operation, ulid, inserted_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT (store, ulid, object_type) DO NOTHING`,
storeID, "metaresource", "organization/"+orgID+"/public-dashboard/*", "read", "role", "organization/"+orgID+"/role/"+authtypes.SigNozAnonymousRoleName, "assignee", 0, tupleID, now,
storeID, "metaresource", "organization/"+orgID+"/public-dashboard/*", "read", "role", "organization/"+orgID+"/role/"+roletypes.SigNozAnonymousRoleName, "assignee", 0, tupleID, now,
)
if err != nil {
return err

View File

@@ -8,9 +8,8 @@ import (
)
var (
IndexTypeUnique = IndexType{s: valuer.NewString("uq")}
IndexTypeIndex = IndexType{s: valuer.NewString("ix")}
IndexTypePartialUnique = IndexType{s: valuer.NewString("puq")}
IndexTypeUnique = IndexType{s: valuer.NewString("uq")}
IndexTypeIndex = IndexType{s: valuer.NewString("ix")}
)
type IndexType struct{ s valuer.String }
@@ -23,7 +22,6 @@ type Index interface {
// The name of the index.
// - Indexes are named as `ix_<table_name>_<column_names>`. The column names are separated by underscores.
// - Unique constraints are named as `uq_<table_name>_<column_names>`. The column names are separated by underscores.
// - Partial unique indexes are named as `puq_<table_name>_<column_names>_<predicate_hash>`.
// The name is autogenerated and should not be set by the user.
Name() string
@@ -135,101 +133,3 @@ func (index *UniqueIndex) ToDropSQL(fmter SQLFormatter) []byte {
return sql
}
type PartialUniqueIndex struct {
TableName TableName
ColumnNames []ColumnName
Where string
name string
}
func (index *PartialUniqueIndex) Name() string {
if index.name != "" {
return index.name
}
var b strings.Builder
b.WriteString(IndexTypePartialUnique.String())
b.WriteString("_")
b.WriteString(string(index.TableName))
b.WriteString("_")
for i, column := range index.ColumnNames {
if i > 0 {
b.WriteString("_")
}
b.WriteString(string(column))
}
b.WriteString("_")
b.WriteString((&whereNormalizer{input: index.Where}).hash())
return b.String()
}
func (index *PartialUniqueIndex) Named(name string) Index {
copyOfColumnNames := make([]ColumnName, len(index.ColumnNames))
copy(copyOfColumnNames, index.ColumnNames)
return &PartialUniqueIndex{
TableName: index.TableName,
ColumnNames: copyOfColumnNames,
Where: index.Where,
name: name,
}
}
func (index *PartialUniqueIndex) IsNamed() bool {
return index.name != ""
}
func (*PartialUniqueIndex) Type() IndexType {
return IndexTypePartialUnique
}
func (index *PartialUniqueIndex) Columns() []ColumnName {
return index.ColumnNames
}
func (index *PartialUniqueIndex) Equals(other Index) bool {
if other.Type() != IndexTypePartialUnique {
return false
}
otherPartial, ok := other.(*PartialUniqueIndex)
if !ok {
return false
}
return index.Name() == other.Name() && slices.Equal(index.Columns(), other.Columns()) && (&whereNormalizer{input: index.Where}).normalize() == (&whereNormalizer{input: otherPartial.Where}).normalize()
}
func (index *PartialUniqueIndex) ToCreateSQL(fmter SQLFormatter) []byte {
sql := []byte{}
sql = append(sql, "CREATE UNIQUE INDEX IF NOT EXISTS "...)
sql = fmter.AppendIdent(sql, index.Name())
sql = append(sql, " ON "...)
sql = fmter.AppendIdent(sql, string(index.TableName))
sql = append(sql, " ("...)
for i, column := range index.ColumnNames {
if i > 0 {
sql = append(sql, ", "...)
}
sql = fmter.AppendIdent(sql, string(column))
}
sql = append(sql, ") WHERE "...)
sql = append(sql, index.Where...)
return sql
}
func (index *PartialUniqueIndex) ToDropSQL(fmter SQLFormatter) []byte {
sql := []byte{}
sql = append(sql, "DROP INDEX IF EXISTS "...)
sql = fmter.AppendIdent(sql, index.Name())
return sql
}

View File

@@ -38,110 +38,6 @@ func TestIndexToCreateSQL(t *testing.T) {
},
sql: `CREATE UNIQUE INDEX IF NOT EXISTS "my_index" ON "users" ("id", "name", "email")`,
},
{
name: "PartialUnique_1Column",
index: &PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `"deleted_at" IS NULL`,
},
sql: `CREATE UNIQUE INDEX IF NOT EXISTS "puq_users_email_94610c77" ON "users" ("email") WHERE "deleted_at" IS NULL`,
},
{
name: "PartialUnique_2Columns",
index: &PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"org_id", "email"},
Where: `"deleted_at" IS NULL`,
},
sql: `CREATE UNIQUE INDEX IF NOT EXISTS "puq_users_org_id_email_94610c77" ON "users" ("org_id", "email") WHERE "deleted_at" IS NULL`,
},
{
name: "PartialUnique_Named",
index: &PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `"deleted_at" IS NULL`,
name: "my_partial_index",
},
sql: `CREATE UNIQUE INDEX IF NOT EXISTS "my_partial_index" ON "users" ("email") WHERE "deleted_at" IS NULL`,
},
{
name: "PartialUnique_WhereWithParentheses",
index: &PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `("deleted_at" IS NULL)`,
},
sql: `CREATE UNIQUE INDEX IF NOT EXISTS "puq_users_email_94610c77" ON "users" ("email") WHERE ("deleted_at" IS NULL)`,
},
{
name: "PartialUnique_WhereWithQuotedIdentifier",
index: &PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `"order" IS NULL`,
},
sql: `CREATE UNIQUE INDEX IF NOT EXISTS "puq_users_email_14c5f5f2" ON "users" ("email") WHERE "order" IS NULL`,
},
{
name: "PartialUnique_WhereWithQuotedLiteral",
index: &PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `status = 'somewhere'`,
},
sql: `CREATE UNIQUE INDEX IF NOT EXISTS "puq_users_email_9817c709" ON "users" ("email") WHERE status = 'somewhere'`,
},
{
name: "PartialUnique_WhereWith2Columns",
index: &PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email", "status"},
Where: `email = 'test@example.com' AND status = 'active'`,
},
sql: `CREATE UNIQUE INDEX IF NOT EXISTS "puq_users_email_status_e70e78c3" ON "users" ("email", "status") WHERE email = 'test@example.com' AND status = 'active'`,
},
// postgres docs example
{
name: "PartialUnique_WhereWithPostgresDocsExample_1",
index: &PartialUniqueIndex{
TableName: "access_log",
ColumnNames: []ColumnName{"client_ip"},
Where: `NOT (client_ip > inet '192.168.100.0' AND client_ip < inet '192.168.100.255')`,
},
sql: `CREATE UNIQUE INDEX IF NOT EXISTS "puq_access_log_client_ip_5a596410" ON "access_log" ("client_ip") WHERE NOT (client_ip > inet '192.168.100.0' AND client_ip < inet '192.168.100.255')`,
},
// postgres docs example
{
name: "PartialUnique_WhereWithPostgresDocsExample_2",
index: &PartialUniqueIndex{
TableName: "orders",
ColumnNames: []ColumnName{"order_nr"},
Where: `billed is not true`,
},
sql: `CREATE UNIQUE INDEX IF NOT EXISTS "puq_orders_order_nr_6d31bb0e" ON "orders" ("order_nr") WHERE billed is not true`,
},
// sqlite docs example
{
name: "PartialUnique_WhereWithSqliteDocsExample_1",
index: &PartialUniqueIndex{
TableName: "person",
ColumnNames: []ColumnName{"team_id"},
Where: `is_team_leader`,
},
sql: `CREATE UNIQUE INDEX IF NOT EXISTS "puq_person_team_id_c8604a29" ON "person" ("team_id") WHERE is_team_leader`,
},
// sqlite docs example
{
name: "PartialUnique_WhereWithSqliteDocsExample_2",
index: &PartialUniqueIndex{
TableName: "purchaseorder",
ColumnNames: []ColumnName{"parent_po"},
Where: `parent_po IS NOT NULL`,
},
sql: `CREATE UNIQUE INDEX IF NOT EXISTS "puq_purchaseorder_parent_po_dbe2929d" ON "purchaseorder" ("parent_po") WHERE parent_po IS NOT NULL`,
},
}
for _, testCase := range testCases {
@@ -153,109 +49,3 @@ func TestIndexToCreateSQL(t *testing.T) {
})
}
}
func TestIndexEquals(t *testing.T) {
testCases := []struct {
name string
a Index
b Index
equals bool
}{
{
name: "PartialUnique_Same",
a: &PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `"deleted_at" IS NULL`,
},
b: &PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `"deleted_at" IS NULL`,
},
equals: true,
},
{
name: "PartialUnique_NormalizedPostgresWhere",
a: &PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `"deleted_at" IS NULL`,
},
b: &PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `(deleted_at IS NULL)`,
},
equals: true,
},
{
name: "PartialUnique_DifferentWhere",
a: &PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `"deleted_at" IS NULL`,
},
b: &PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `"active" = true`,
},
equals: false,
},
{
name: "PartialUnique_NotEqual_Unique",
a: &PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `"deleted_at" IS NULL`,
},
b: &UniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
},
equals: false,
},
{
name: "Unique_NotEqual_PartialUnique",
a: &UniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
},
b: &PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `"deleted_at" IS NULL`,
},
equals: false,
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
assert.Equal(t, testCase.equals, testCase.a.Equals(testCase.b))
})
}
}
func TestPartialUniqueIndexName(t *testing.T) {
a := &PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `"deleted_at" IS NULL`,
}
b := &PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `(deleted_at IS NULL)`,
}
c := &PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `"active" = true`,
}
assert.Equal(t, "puq_users_email_94610c77", a.Name())
assert.Equal(t, a.Name(), b.Name())
assert.NotEqual(t, a.Name(), c.Name())
}

View File

@@ -1,162 +0,0 @@
package sqlschema
import (
"fmt"
"hash/fnv"
"strings"
)
type whereNormalizer struct {
input string
}
func (n *whereNormalizer) hash() string {
hasher := fnv.New32a()
_, _ = hasher.Write([]byte(n.normalize()))
return fmt.Sprintf("%08x", hasher.Sum32())
}
func (n *whereNormalizer) normalize() string {
where := strings.TrimSpace(n.input)
where = n.stripOuterParentheses(where)
var output strings.Builder
output.Grow(len(where))
for i := 0; i < len(where); i++ {
switch where[i] {
case ' ', '\t', '\n', '\r':
if output.Len() > 0 {
last := output.String()[output.Len()-1]
if last != ' ' {
output.WriteByte(' ')
}
}
case '\'':
end := n.consumeSingleQuotedLiteral(where, i, &output)
i = end
case '"':
token, end := n.consumeDoubleQuotedToken(where, i)
output.WriteString(token)
i = end
default:
output.WriteByte(where[i])
}
}
return strings.TrimSpace(output.String())
}
func (n *whereNormalizer) stripOuterParentheses(s string) string {
for {
s = strings.TrimSpace(s)
if len(s) < 2 || s[0] != '(' || s[len(s)-1] != ')' || !n.hasWrappingParentheses(s) {
return s
}
s = s[1 : len(s)-1]
}
}
func (n *whereNormalizer) hasWrappingParentheses(s string) bool {
depth := 0
inSingleQuotedLiteral := false
inDoubleQuotedToken := false
for i := 0; i < len(s); i++ {
switch s[i] {
case '\'':
if inDoubleQuotedToken {
continue
}
if inSingleQuotedLiteral && i+1 < len(s) && s[i+1] == '\'' {
i++
continue
}
inSingleQuotedLiteral = !inSingleQuotedLiteral
case '"':
if inSingleQuotedLiteral {
continue
}
if inDoubleQuotedToken && i+1 < len(s) && s[i+1] == '"' {
i++
continue
}
inDoubleQuotedToken = !inDoubleQuotedToken
case '(':
if inSingleQuotedLiteral || inDoubleQuotedToken {
continue
}
depth++
case ')':
if inSingleQuotedLiteral || inDoubleQuotedToken {
continue
}
depth--
if depth == 0 && i != len(s)-1 {
return false
}
}
}
return depth == 0
}
func (n *whereNormalizer) consumeSingleQuotedLiteral(s string, start int, output *strings.Builder) int {
output.WriteByte(s[start])
for i := start + 1; i < len(s); i++ {
output.WriteByte(s[i])
if s[i] == '\'' {
if i+1 < len(s) && s[i+1] == '\'' {
i++
output.WriteByte(s[i])
continue
}
return i
}
}
return len(s) - 1
}
func (n *whereNormalizer) consumeDoubleQuotedToken(s string, start int) (string, int) {
var ident strings.Builder
for i := start + 1; i < len(s); i++ {
if s[i] == '"' {
if i+1 < len(s) && s[i+1] == '"' {
ident.WriteByte('"')
i++
continue
}
if n.isSimpleUnquotedIdentifier(ident.String()) {
return ident.String(), i
}
return s[start : i+1], i
}
ident.WriteByte(s[i])
}
return s[start:], len(s) - 1
}
func (n *whereNormalizer) isSimpleUnquotedIdentifier(s string) bool {
if s == "" || strings.ToLower(s) != s {
return false
}
for i := 0; i < len(s); i++ {
ch := s[i]
if (ch >= 'a' && ch <= 'z') || ch == '_' {
continue
}
if i > 0 && ch >= '0' && ch <= '9' {
continue
}
return false
}
return true
}

View File

@@ -1,57 +0,0 @@
package sqlschema
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestWhereNormalizerNormalize(t *testing.T) {
testCases := []struct {
name string
input string
output string
}{
{
name: "BooleanComparison",
input: `"active" = true`,
output: `active = true`,
},
{
name: "QuotedStringLiteralPreserved",
input: `status = 'somewhere'`,
output: `status = 'somewhere'`,
},
{
name: "EscapedStringLiteralPreserved",
input: `status = 'it''s active'`,
output: `status = 'it''s active'`,
},
{
name: "OuterParenthesesRemoved",
input: `(("deleted_at" IS NULL))`,
output: `deleted_at IS NULL`,
},
{
name: "InnerParenthesesPreserved",
input: `("deleted_at" IS NULL OR ("active" = true AND "status" = 'open'))`,
output: `deleted_at IS NULL OR (active = true AND status = 'open')`,
},
{
name: "MultipleClausesWhitespaceCollapsed",
input: " ( \"deleted_at\" IS NULL \n AND\t\"active\" = true AND status = 'open' ) ",
output: `deleted_at IS NULL AND active = true AND status = 'open'`,
},
{
name: "ComplexBooleanClauses",
input: `NOT ("deleted_at" IS NOT NULL AND ("active" = false OR "status" = 'archived'))`,
output: `NOT (deleted_at IS NOT NULL AND (active = false OR status = 'archived'))`,
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
assert.Equal(t, testCase.output, (&whereNormalizer{input: testCase.input}).normalize())
})
}
}

View File

@@ -1146,100 +1146,3 @@ func TestOperatorAlterTable(t *testing.T) {
})
}
}
func TestOperatorDiffIndices(t *testing.T) {
testCases := []struct {
name string
oldIndices []Index
newIndices []Index
expectedSQLs [][]byte
}{
{
name: "UniqueToPartialUnique_DropAndCreate",
oldIndices: []Index{
&UniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
},
},
newIndices: []Index{
&PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `"deleted_at" IS NULL`,
},
},
expectedSQLs: [][]byte{
[]byte(`DROP INDEX IF EXISTS "uq_users_email"`),
[]byte(`CREATE UNIQUE INDEX IF NOT EXISTS "puq_users_email_94610c77" ON "users" ("email") WHERE "deleted_at" IS NULL`),
},
},
{
name: "PartialUnique_SameWhere_NoOp",
oldIndices: []Index{
&PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `"deleted_at" IS NULL`,
},
},
newIndices: []Index{
&PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `"deleted_at" IS NULL`,
},
},
expectedSQLs: [][]byte{},
},
{
name: "PartialUnique_NormalizedWhere_NoOp",
oldIndices: []Index{
&PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `(deleted_at IS NULL)`,
},
},
newIndices: []Index{
&PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `"deleted_at" IS NULL`,
},
},
expectedSQLs: [][]byte{},
},
{
name: "PartialUnique_DifferentWhere_DropAndCreate",
oldIndices: []Index{
&PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `"deleted_at" IS NULL`,
},
},
newIndices: []Index{
&PartialUniqueIndex{
TableName: "users",
ColumnNames: []ColumnName{"email"},
Where: `"active" = true`,
},
},
expectedSQLs: [][]byte{
[]byte(`DROP INDEX IF EXISTS "puq_users_email_94610c77"`),
[]byte(`CREATE UNIQUE INDEX IF NOT EXISTS "puq_users_email_202121f8" ON "users" ("email") WHERE "active" = true`),
},
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
fmter := NewFormatter(schema.NewNopFormatter().Dialect())
operator := NewOperator(fmter, OperatorSupport{})
actuals := operator.DiffIndices(testCase.oldIndices, testCase.newIndices)
assert.Equal(t, testCase.expectedSQLs, actuals)
})
}
}

View File

@@ -3,7 +3,6 @@ package sqlitesqlschema
import (
"context"
"strconv"
"strings"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
@@ -115,29 +114,7 @@ func (provider *provider) GetIndices(ctx context.Context, tableName sqlschema.Ta
return nil, err
}
if unique && partial {
var indexSQL string
if err := provider.
sqlstore.
BunDB().
NewRaw("SELECT sql FROM sqlite_master WHERE type = 'index' AND name = ?", name).
Scan(ctx, &indexSQL); err != nil {
return nil, err
}
where := extractWhereClause(indexSQL)
index := &sqlschema.PartialUniqueIndex{
TableName: tableName,
ColumnNames: columns,
Where: where,
}
if index.Name() == name {
indices = append(indices, index)
} else {
indices = append(indices, index.Named(name))
}
} else if unique {
if unique {
index := &sqlschema.UniqueIndex{
TableName: tableName,
ColumnNames: columns,
@@ -171,73 +148,3 @@ func (provider *provider) ToggleFKEnforcement(ctx context.Context, db bun.IDB, o
return errors.NewInternalf(errors.CodeInternal, "foreign_keys(actual: %s, expected: %s), maybe a transaction is in progress?", strconv.FormatBool(val), strconv.FormatBool(on))
}
func extractWhereClause(sql string) string {
lastWhere := -1
inSingleQuotedLiteral := false
inDoubleQuotedIdentifier := false
inBacktickQuotedIdentifier := false
inBracketQuotedIdentifier := false
for i := 0; i < len(sql); i++ {
switch sql[i] {
case '\'':
if inDoubleQuotedIdentifier || inBacktickQuotedIdentifier || inBracketQuotedIdentifier {
continue
}
if inSingleQuotedLiteral && i+1 < len(sql) && sql[i+1] == '\'' {
i++
continue
}
inSingleQuotedLiteral = !inSingleQuotedLiteral
case '"':
if inSingleQuotedLiteral || inBacktickQuotedIdentifier || inBracketQuotedIdentifier {
continue
}
if inDoubleQuotedIdentifier && i+1 < len(sql) && sql[i+1] == '"' {
i++
continue
}
inDoubleQuotedIdentifier = !inDoubleQuotedIdentifier
case '`':
if inSingleQuotedLiteral || inDoubleQuotedIdentifier || inBracketQuotedIdentifier {
continue
}
inBacktickQuotedIdentifier = !inBacktickQuotedIdentifier
case '[':
if inSingleQuotedLiteral || inDoubleQuotedIdentifier || inBacktickQuotedIdentifier || inBracketQuotedIdentifier {
continue
}
inBracketQuotedIdentifier = true
case ']':
if inBracketQuotedIdentifier {
inBracketQuotedIdentifier = false
}
}
if inSingleQuotedLiteral || inDoubleQuotedIdentifier || inBacktickQuotedIdentifier || inBracketQuotedIdentifier {
continue
}
if strings.EqualFold(sql[i:min(i+5, len(sql))], "WHERE") &&
(i == 0 || !isSQLiteIdentifierChar(sql[i-1])) &&
(i+5 == len(sql) || !isSQLiteIdentifierChar(sql[i+5])) {
lastWhere = i
i += 4
}
}
if lastWhere == -1 {
return ""
}
return strings.TrimSpace(sql[lastWhere+len("WHERE"):])
}
func isSQLiteIdentifierChar(ch byte) bool {
return (ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') ||
ch == '_'
}

View File

@@ -1,52 +0,0 @@
package sqlitesqlschema
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestExtractWhereClause(t *testing.T) {
testCases := []struct {
name string
sql string
where string
}{
{
name: "UppercaseWhere",
sql: `CREATE UNIQUE INDEX "idx" ON "users" ("email") WHERE "deleted_at" IS NULL`,
where: `"deleted_at" IS NULL`,
},
{
name: "LowercaseWhere",
sql: `CREATE UNIQUE INDEX "idx" ON "users" ("email") where "deleted_at" IS NULL`,
where: `"deleted_at" IS NULL`,
},
{
name: "NewlineBeforeWhere",
sql: "CREATE UNIQUE INDEX \"idx\" ON \"users\" (\"email\")\nWHERE \"deleted_at\" IS NULL",
where: `"deleted_at" IS NULL`,
},
{
name: "ExtraWhitespace",
sql: "CREATE UNIQUE INDEX \"idx\" ON \"users\" (\"email\") \n \t where \"deleted_at\" IS NULL ",
where: `"deleted_at" IS NULL`,
},
{
name: "WhereInStringLiteral",
sql: `CREATE UNIQUE INDEX "idx" ON "users" ("email") WHERE status = 'somewhere'`,
where: `status = 'somewhere'`,
},
{
name: "BooleanLiteral",
sql: `CREATE UNIQUE INDEX "idx" ON "users" ("email") WHERE active = true`,
where: `active = true`,
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
assert.Equal(t, testCase.where, extractWhereClause(testCase.sql))
})
}
}

View File

@@ -2,17 +2,15 @@ package telemetrylogs
import (
"context"
"slices"
"strings"
"testing"
"time"
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
"github.com/SigNoz/signoz/pkg/querybuilder"
"github.com/SigNoz/signoz/pkg/querybuilder/resourcefilter"
"github.com/SigNoz/signoz/pkg/telemetrystore/chdbtelemetrystore"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes/telemetrytypestest"
"github.com/stretchr/testify/require"
)
@@ -809,44 +807,30 @@ func TestStatementBuilderListQueryBodyMessage(t *testing.T) {
}
}
func buildTestTelemetryMetadataStore(t *testing.T, promotedPaths ...string) *telemetrytypestest.MockMetadataStore {
mockMetadataStore := telemetrytypestest.NewMockMetadataStore()
func enableBodyJSONQuery(_ *testing.T) {
querybuilder.BodyJSONQueryEnabled = true
}
types, _ := telemetrytypes.TestJSONTypeSet()
for path, jsonTypes := range types {
promoted := false
split := strings.Split(path, telemetrytypes.ArraySep)
if path == "message" {
promoted = true
} else if slices.Contains(promotedPaths, split[0]) {
promoted = true
}
// Create a TelemetryFieldKey for each JSONDataType for this path
// Since a path can have multiple types, we create one key per type
for _, jsonType := range jsonTypes {
key := &telemetrytypes.TelemetryFieldKey{
Name: path,
Signal: telemetrytypes.SignalLogs,
FieldContext: telemetrytypes.FieldContextBody,
FieldDataType: telemetrytypes.MappingJSONDataTypeToFieldDataType[jsonType],
JSONDataType: &jsonType,
Materialized: promoted,
}
err := key.SetJSONAccessPlan(telemetrytypes.JSONColumnMetadata{
BaseColumn: LogsV2BodyJSONColumn,
PromotedColumn: LogsV2BodyPromotedColumn,
}, types)
require.NoError(t, err)
mockMetadataStore.SetKey(key)
}
}
return mockMetadataStore
func disableBodyJSONQuery(_ *testing.T) {
querybuilder.BodyJSONQueryEnabled = false
}
func buildJSONTestStatementBuilder(t *testing.T, promotedPaths ...string) *logQueryStatementBuilder {
mockMetadataStore := buildTestTelemetryMetadataStore(t, promotedPaths...)
t.Helper()
provider, cleanup, err := chdbtelemetrystore.New()
require.NoError(t, err)
t.Cleanup(cleanup)
ctx := context.Background()
types, _ := telemetrytypes.TestJSONTypeSet()
require.NoError(t, provider.SeedBodyJSONPaths(ctx, types))
// "message" is always promoted in these tests.
allPromoted := append([]string{"message"}, promotedPaths...)
require.NoError(t, provider.SeedPromotedPaths(ctx, allPromoted...))
metadataStore := chdbtelemetrystore.NewChdbMetadataStore(provider)
fm := NewFieldMapper()
cb := NewConditionBuilder(fm)
@@ -855,14 +839,14 @@ func buildJSONTestStatementBuilder(t *testing.T, promotedPaths ...string) *logQu
instrumentationtest.New().ToProviderSettings(),
fm,
cb,
mockMetadataStore,
metadataStore,
DefaultFullTextColumn,
GetBodyJSONKey,
)
statementBuilder := NewLogQueryStatementBuilder(
return NewLogQueryStatementBuilder(
instrumentationtest.New().ToProviderSettings(),
mockMetadataStore,
metadataStore,
fm,
cb,
resourceFilterStmtBuilder,
@@ -870,27 +854,4 @@ func buildJSONTestStatementBuilder(t *testing.T, promotedPaths ...string) *logQu
DefaultFullTextColumn,
GetBodyJSONKey,
)
return statementBuilder
}
func testAddIndexedPaths(t *testing.T, statementBuilder *logQueryStatementBuilder, telemetryFieldKeys ...*telemetrytypes.TelemetryFieldKey) {
mockMetadataStore := statementBuilder.metadataStore.(*telemetrytypestest.MockMetadataStore)
for _, key := range telemetryFieldKeys {
if strings.Contains(key.Name, telemetrytypes.ArraySep) || strings.Contains(key.Name, telemetrytypes.ArrayAnyIndex) {
t.Fatalf("array paths are not supported: %s", key.Name)
}
for _, storedKey := range mockMetadataStore.KeysMap[key.Name] {
storedKey.Indexes = append(storedKey.Indexes, key.Indexes...)
}
}
}
func enableBodyJSONQuery(_ *testing.T) {
querybuilder.BodyJSONQueryEnabled = true
}
func disableBodyJSONQuery(_ *testing.T) {
querybuilder.BodyJSONQueryEnabled = false
}

View File

@@ -12,7 +12,6 @@ import (
"github.com/SigNoz/signoz-otel-collector/constants"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/querybuilder"
"github.com/SigNoz/signoz/pkg/telemetrylogs"
"github.com/SigNoz/signoz/pkg/types/ctxtypes"
"github.com/SigNoz/signoz/pkg/types/instrumentationtypes"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
@@ -252,13 +251,13 @@ func (t *telemetryMetaStore) getJSONPathIndexes(ctx context.Context, paths ...st
return cleanIndexes, nil
}
func buildListLogsJSONIndexesQuery(cluster string, filters ...string) (string, []any) {
func buildListLogsJSONIndexesQuery(cluster, logsDBName, logsV2LocalTblName string, filters ...string) (string, []any) {
sb := sqlbuilder.Select(
"name", "type_full", "expr", "granularity",
).From(fmt.Sprintf("clusterAllReplicas('%s', %s)", cluster, SkipIndexTableName))
sb.Where(sb.Equal("database", telemetrylogs.DBName))
sb.Where(sb.Equal("table", telemetrylogs.LogsV2LocalTableName))
sb.Where(sb.Equal("database", logsDBName))
sb.Where(sb.Equal("table", logsV2LocalTblName))
sb.Where(sb.Or(
sb.ILike("expr", fmt.Sprintf("%%%s%%", querybuilder.FormatValueForContains(constants.BodyV2ColumnPrefix))),
sb.ILike("expr", fmt.Sprintf("%%%s%%", querybuilder.FormatValueForContains(constants.BodyPromotedColumnPrefix))),
@@ -275,7 +274,7 @@ func buildListLogsJSONIndexesQuery(cluster string, filters ...string) (string, [
func (t *telemetryMetaStore) ListLogsJSONIndexes(ctx context.Context, filters ...string) (map[string][]schemamigrator.Index, error) {
ctx = withTelemetryContext(ctx, "ListLogsJSONIndexes")
query, args := buildListLogsJSONIndexesQuery(t.telemetrystore.Cluster(), filters...)
query, args := buildListLogsJSONIndexesQuery(t.telemetrystore.Cluster(), t.logsDBName, logsV2LocalTableName, filters...)
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
if err != nil {
return nil, errors.WrapInternalf(err, CodeFailLoadLogsJSONIndexes, "failed to load string indexed columns")
@@ -317,12 +316,12 @@ func (t *telemetryMetaStore) ListJSONValues(ctx context.Context, path string, li
}
if promoted {
path = telemetrylogs.BodyPromotedColumnPrefix + path
path = constants.BodyPromotedColumnPrefix + path
} else {
path = telemetrylogs.BodyJSONColumnPrefix + path
path = constants.BodyV2ColumnPrefix + path
}
from := fmt.Sprintf("%s.%s", telemetrylogs.DBName, telemetrylogs.LogsV2TableName)
from := fmt.Sprintf("%s.%s", t.logsDBName, t.logsV2TblName)
colExpr := func(typ telemetrytypes.JSONDataType) string {
return fmt.Sprintf("dynamicElement(%s, '%s')", path, typ.StringValue())
}
@@ -471,7 +470,7 @@ func (t *telemetryMetaStore) IsPathPromoted(ctx context.Context, path string) (b
split := strings.Split(path, telemetrytypes.ArraySep)
pathSegment := split[0]
query := fmt.Sprintf("SELECT 1 FROM %s.%s WHERE signal = ? AND column_name = ? AND field_context = ? AND field_name = ? LIMIT 1", DBName, PromotedPathsTableName)
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, telemetrytypes.SignalLogs, telemetrylogs.LogsV2BodyPromotedColumn, telemetrytypes.FieldContextBody, pathSegment)
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, telemetrytypes.SignalLogs, constants.BodyPromotedColumn, telemetrytypes.FieldContextBody, pathSegment)
if err != nil {
return false, errors.WrapInternalf(err, CodeFailCheckPathPromoted, "failed to check if path %s is promoted", path)
}
@@ -486,7 +485,7 @@ func (t *telemetryMetaStore) GetPromotedPaths(ctx context.Context, paths ...stri
sb := sqlbuilder.Select("field_name").From(fmt.Sprintf("%s.%s", DBName, PromotedPathsTableName))
conditions := []string{
sb.Equal("signal", telemetrytypes.SignalLogs),
sb.Equal("column_name", telemetrylogs.LogsV2BodyPromotedColumn),
sb.Equal("column_name", constants.BodyPromotedColumn),
sb.Equal("field_context", telemetrytypes.FieldContextBody),
sb.NotEqual("field_name", "__all__"),
}
@@ -522,8 +521,8 @@ func (t *telemetryMetaStore) GetPromotedPaths(ctx context.Context, paths ...stri
// TODO(Piyush): Remove this function
func CleanPathPrefixes(path string) string {
path = strings.TrimPrefix(path, telemetrytypes.BodyJSONStringSearchPrefix)
path = strings.TrimPrefix(path, telemetrylogs.BodyJSONColumnPrefix)
path = strings.TrimPrefix(path, telemetrylogs.BodyPromotedColumnPrefix)
path = strings.TrimPrefix(path, constants.BodyV2ColumnPrefix)
path = strings.TrimPrefix(path, constants.BodyPromotedColumnPrefix)
return path
}
@@ -543,7 +542,7 @@ func (t *telemetryMetaStore) PromotePaths(ctx context.Context, paths ...string)
if trimmed == "" {
continue
}
if err := batch.Append(telemetrytypes.SignalLogs, telemetrylogs.LogsV2BodyPromotedColumn, "JSON()", telemetrytypes.FieldContextBody, trimmed, 0, releaseTime); err != nil {
if err := batch.Append(telemetrytypes.SignalLogs, constants.BodyPromotedColumn, "JSON()", telemetrytypes.FieldContextBody, trimmed, 0, releaseTime); err != nil {
_ = batch.Abort()
return errors.WrapInternalf(err, CodeFailedToAppendPath, "failed to append path")
}

View File

@@ -140,7 +140,7 @@ func TestBuildListLogsJSONIndexesQuery(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
query, args := buildListLogsJSONIndexesQuery(tc.cluster, tc.filters...)
query, args := buildListLogsJSONIndexesQuery(tc.cluster, telemetrylogs.DBName, telemetrylogs.LogsV2LocalTableName, tc.filters...)
require.Equal(t, tc.expectedSQL, query)
require.Equal(t, tc.expectedArgs, args)

View File

@@ -0,0 +1,6 @@
package telemetrymetadata
// logsV2LocalTableName is the local (non-distributed) ClickHouse table for logs v2.
// Defined here instead of importing telemetrylogs to avoid an import cycle:
// telemetrylogs tests → chdbtelemetrystoretest → telemetrymetadata → telemetrylogs.
const logsV2LocalTableName = "logs_v2"

View File

@@ -1928,37 +1928,3 @@ func (t *telemetryMetaStore) GetFirstSeenFromMetricMetadata(ctx context.Context,
return result, nil
}
func (t *telemetryMetaStore) FetchLastSeenInfoMulti(ctx context.Context, metricNames ...string) (map[string]int64, error) {
sb := sqlbuilder.Select(
"metric_name",
"max(unix_milli)",
).
From(t.metricsDBName + "." + telemetrymetrics.TimeseriesV4TableName)
sb.Where(sb.In("metric_name", metricNames))
sb.GroupBy("metric_name")
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
t.logger.DebugContext(ctx, "fetching metric last seen timestamp", "query", query, "args", args)
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
if err != nil {
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to fetch metric last seen info")
}
defer rows.Close()
lastSeenInfo := make(map[string]int64)
for rows.Next() {
var metricName string
var unix_milli int64
if err := rows.Scan(&metricName, &unix_milli); err != nil {
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "failed to scan last seen info result")
}
lastSeenInfo[metricName] = unix_milli
}
if err := rows.Err(); err != nil {
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, "error iterating over metrics temporality rows")
}
return lastSeenInfo, nil
}

View File

@@ -0,0 +1,120 @@
//go:build chdb
// Package chdbtelemetrystoretest provides central test builder functions backed by
// an in-process chdb session. These builders are used across multiple signal packages
// to avoid import cycles: telemetrymetadata previously imported telemetrylogs, which
// would create a cycle if telemetrylogs tests tried to use telemetrymetadata.
// With that dependency removed, this package can safely import both.
package chdbtelemetrystoretest
import (
"context"
"fmt"
"strings"
"testing"
"time"
otelcollectorconstants "github.com/SigNoz/signoz-otel-collector/constants"
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
"github.com/SigNoz/signoz/pkg/telemetrymetadata"
"github.com/SigNoz/signoz/pkg/telemetrystore/chdbtelemetrystore"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/stretchr/testify/require"
)
// Logs table name constants mirroring telemetrylogs — kept here to avoid importing
// that package (which would create a cycle when telemetrylogs tests import this package).
const (
logsDBName = "signoz_logs"
logsV2TblName = "distributed_logs_v2"
logsTagAttrTblName = "distributed_tag_attributes_v2"
logAttrKeysTblName = "distributed_logs_attribute_keys"
logResKeysTblName = "distributed_logs_resource_keys"
)
// NewLogsMetadataStore creates a chdb-backed MetadataStore seeded from the provided
// TelemetryFieldKeys. Body-context keys are inserted into distributed_json_path_types;
// keys with Materialized=true have their root path inserted into the column-evolution
// metadata table so the store treats them as promoted.
// The returned cleanup function must be called (typically via t.Cleanup).
func NewLogsMetadataStore(t *testing.T, keys ...*telemetrytypes.TelemetryFieldKey) (telemetrytypes.MetadataStore, func()) {
t.Helper()
provider, cleanup, err := chdbtelemetrystore.New()
require.NoError(t, err)
ctx := context.Background()
require.NoError(t, seedFromFieldKeys(ctx, provider, keys))
store := telemetrymetadata.NewTelemetryMetaStore(
instrumentationtest.New().ToProviderSettings(),
provider,
"", "", "", "", // traces (unused in logs tests)
"", "", // metrics (unused in logs tests)
"", "", // meter (unused in logs tests)
logsDBName,
logsV2TblName,
logsTagAttrTblName,
logAttrKeysTblName,
logResKeysTblName,
telemetrymetadata.DBName,
telemetrymetadata.AttributesMetadataLocalTableName,
)
return store, cleanup
}
// seedFromFieldKeys inserts body-JSON path/type rows and promoted-path rows derived
// from the given keys into the chdb session backing provider.
func seedFromFieldKeys(ctx context.Context, provider *chdbtelemetrystore.Provider, keys []*telemetrytypes.TelemetryFieldKey) error {
lastSeen := uint64(time.Now().UnixNano())
releaseTime := time.Now().UnixNano()
conn := provider.ClickhouseDB()
promotedPaths := map[string]bool{}
for _, key := range keys {
if key.FieldContext != telemetrytypes.FieldContextBody || key.JSONDataType == nil {
continue
}
// Insert into distributed_json_path_types
query := fmt.Sprintf(
"INSERT INTO %s.%s (%s, %s, %s) VALUES (?, ?, ?)",
otelcollectorconstants.SignozMetadataDB,
otelcollectorconstants.DistributedPathTypesTable,
otelcollectorconstants.PathTypesTablePathColumn,
otelcollectorconstants.PathTypesTableTypeColumn,
otelcollectorconstants.PathTypesTableLastSeenColumn,
)
if err := conn.Exec(ctx, query, key.Name, key.JSONDataType.StringValue(), lastSeen); err != nil {
return fmt.Errorf("seedFromFieldKeys: insert path %s/%s: %w", key.Name, key.JSONDataType.StringValue(), err)
}
if key.Materialized {
rootPath := strings.Split(key.Name, telemetrytypes.ArraySep)[0]
promotedPaths[rootPath] = true
}
}
for path := range promotedPaths {
query := fmt.Sprintf(
"INSERT INTO %s.%s (signal, column_name, column_type, field_context, field_name, version, release_time) VALUES (?, ?, ?, ?, ?, ?, ?)",
telemetrymetadata.DBName,
telemetrymetadata.PromotedPathsTableName,
)
if err := conn.Exec(ctx, query,
telemetrytypes.SignalLogs,
otelcollectorconstants.BodyPromotedColumn,
"JSON()",
telemetrytypes.FieldContextBody,
path,
0,
releaseTime,
); err != nil {
return fmt.Errorf("seedFromFieldKeys: insert promoted path %s: %w", path, err)
}
}
return nil
}

View File

@@ -0,0 +1,51 @@
//go:build !chdb
package chdbtelemetrystoretest
import (
"strings"
"testing"
otelcollectorconstants "github.com/SigNoz/signoz-otel-collector/constants"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes/telemetrytypestest"
)
// NewLogsMetadataStore returns a MockMetadataStore populated from the provided
// TelemetryFieldKeys. A type-cache is built from the keys so that SetJSONAccessPlan
// can be resolved for each body-context key before it is stored.
// The returned cleanup function is a no-op (nothing to tear down for an in-memory store).
func NewLogsMetadataStore(t *testing.T, keys ...*telemetrytypes.TelemetryFieldKey) (telemetrytypes.MetadataStore, func()) {
t.Helper()
mockStore := telemetrytypestest.NewMockMetadataStore()
// Build type-cache from the incoming keys so SetJSONAccessPlan can resolve
// parent-path array types (used by nested / array paths).
typeCache := make(map[string][]telemetrytypes.JSONDataType)
for _, key := range keys {
if key.JSONDataType != nil {
typeCache[key.Name] = append(typeCache[key.Name], *key.JSONDataType)
}
}
for _, key := range keys {
if key.FieldContext == telemetrytypes.FieldContextBody && key.JSONDataType != nil {
if err := key.SetJSONAccessPlan(telemetrytypes.JSONColumnMetadata{
BaseColumn: otelcollectorconstants.BodyV2Column,
PromotedColumn: otelcollectorconstants.BodyPromotedColumn,
}, typeCache); err != nil {
t.Fatalf("NewLogsMetadataStore: SetJSONAccessPlan for %q: %v", key.Name, err)
}
}
if key.Materialized {
rootPath := strings.Split(key.Name, telemetrytypes.ArraySep)[0]
mockStore.PromotedPathsMap[rootPath] = true
}
mockStore.SetKey(key)
}
return mockStore, func() {}
}

View File

@@ -0,0 +1,140 @@
//go:build chdb
package chdbtelemetrystore
import (
"context"
"fmt"
"regexp"
"strings"
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
chdb "github.com/chdb-io/chdb-go/chdb"
"github.com/huandu/go-sqlbuilder"
)
// clusterAllReplicasRe matches clusterAllReplicas('<cluster>', <table>) and captures
// the table expression so we can rewrite it for chdb's single-node context.
var clusterAllReplicasRe = regexp.MustCompile(`(?i)clusterAllReplicas\('[^']*',\s*([^)]+)\)`)
// rewriteClusterAllReplicas strips the clusterAllReplicas wrapper from a query,
// replacing it with a direct table reference. This lets single-node chdb sessions
// execute queries originally written for a multi-node ClickHouse cluster.
func rewriteClusterAllReplicas(query string) string {
return clusterAllReplicasRe.ReplaceAllStringFunc(query, func(match string) string {
sub := clusterAllReplicasRe.FindStringSubmatch(match)
if len(sub) < 2 {
return match
}
return strings.TrimSpace(sub[1])
})
}
// interpolateArgs substitutes ? placeholders in query using the ClickHouse SQL flavor
// from go-sqlbuilder — the same mechanism used by chdb's own database/sql driver.
func interpolateArgs(query string, args []any) (string, error) {
if len(args) == 0 {
return query, nil
}
return sqlbuilder.ClickHouse.Interpolate(query, args)
}
// chdbConn wraps a chdb Session and exposes it as a clickhouse.Conn.
// Exec, Select, Query, and QueryRow execute queries for real via chdb.
// The remaining interface methods are lightweight stubs sufficient for testing.
type chdbConn struct {
session *chdb.Session
}
var _ clickhouse.Conn = (*chdbConn)(nil)
func (c *chdbConn) Contributors() []string { return nil }
func (c *chdbConn) ServerVersion() (*driver.ServerVersion, error) {
return &driver.ServerVersion{DisplayName: "chdb"}, nil
}
func (c *chdbConn) Ping(_ context.Context) error { return nil }
func (c *chdbConn) Stats() driver.Stats { return driver.Stats{} }
func (c *chdbConn) Close() error {
c.session.Close()
return nil
}
func (c *chdbConn) AsyncInsert(ctx context.Context, query string, _ bool, args ...any) error {
return c.Exec(ctx, query, args...)
}
func (c *chdbConn) PrepareBatch(_ context.Context, _ string, _ ...driver.PrepareBatchOption) (driver.Batch, error) {
return nil, fmt.Errorf("chdbConn: PrepareBatch not implemented")
}
// Exec executes a DDL or DML statement (CREATE TABLE, INSERT, DROP, …) via chdb.
// Any result set is discarded; only errors are surfaced.
func (c *chdbConn) Exec(_ context.Context, query string, args ...any) error {
query = rewriteClusterAllReplicas(query)
compiled, err := interpolateArgs(query, args)
if err != nil {
return fmt.Errorf("chdbConn: Exec: interpolate args: %w", err)
}
result, err := c.session.Query(compiled, "CSV")
if err != nil {
return fmt.Errorf("chdbConn: Exec: %w", err)
}
defer result.Free()
return result.Error()
}
// Select executes query and scans all result rows into dest.
// dest must be a pointer to a slice of structs or maps.
//
// Struct fields are matched to ClickHouse columns using the following priority:
// 1. `ch:"<column>"` struct tag
// 2. `json:"<column>"` struct tag
// 3. Lowercased field name
func (c *chdbConn) Select(_ context.Context, dest any, query string, args ...any) error {
query = rewriteClusterAllReplicas(query)
compiled, err := interpolateArgs(query, args)
if err != nil {
return fmt.Errorf("chdbConn: Select: interpolate args: %w", err)
}
result, err := c.session.Query(compiled, "JSONCompact")
if err != nil {
return fmt.Errorf("chdbConn: Select: %w", err)
}
defer result.Free()
if err := result.Error(); err != nil {
return err
}
return scanJSONCompactIntoSlice(result.String(), dest)
}
// Query executes query and returns a Rows iterator.
func (c *chdbConn) Query(_ context.Context, query string, args ...any) (driver.Rows, error) {
query = rewriteClusterAllReplicas(query)
compiled, err := interpolateArgs(query, args)
if err != nil {
return nil, fmt.Errorf("chdbConn: Query: interpolate args: %w", err)
}
result, err := c.session.Query(compiled, "JSONCompact")
if err != nil {
return nil, fmt.Errorf("chdbConn: Query: %w", err)
}
if err := result.Error(); err != nil {
result.Free()
return nil, err
}
return newChdbRows(result)
}
// QueryRow executes query and returns a single Row.
func (c *chdbConn) QueryRow(ctx context.Context, query string, args ...any) driver.Row {
rows, err := c.Query(ctx, query, args...)
if err != nil {
return &chdbRow{err: err}
}
return &chdbRow{rows: rows.(*chdbRows)}
}

View File

@@ -0,0 +1,177 @@
//go:build chdb
package chdbtelemetrystore
import (
"encoding/json"
"fmt"
"reflect"
"strings"
)
// jsonCompactResult is the top-level structure of ClickHouse's JSONCompact output format.
type jsonCompactResult struct {
Meta []jsonMeta `json:"meta"`
Data [][]json.RawMessage `json:"data"`
}
type jsonMeta struct {
Name string `json:"name"`
Type string `json:"type"`
}
// scanJSONCompactIntoSlice parses a JSONCompact response and appends rows into dest
// (must be a pointer to a slice of structs or maps).
func scanJSONCompactIntoSlice(jsonStr string, dest any) error {
if strings.TrimSpace(jsonStr) == "" {
return nil
}
var jr jsonCompactResult
if err := json.Unmarshal([]byte(jsonStr), &jr); err != nil {
return fmt.Errorf("chdbConn: Select: parse response: %w", err)
}
destVal := reflect.ValueOf(dest)
if destVal.Kind() != reflect.Ptr || destVal.Elem().Kind() != reflect.Slice {
return fmt.Errorf("chdbConn: Select: dest must be a pointer to a slice, got %T", dest)
}
sliceVal := destVal.Elem()
elemType := sliceVal.Type().Elem()
for _, row := range jr.Data {
elem := reflect.New(elemType).Elem()
if err := scanRowIntoValue(jr.Meta, row, elem); err != nil {
return err
}
sliceVal.Set(reflect.Append(sliceVal, elem))
}
return nil
}
// scanRowIntoValue fills a struct or map Value from a single JSONCompact data row.
func scanRowIntoValue(meta []jsonMeta, row []json.RawMessage, elem reflect.Value) error {
switch elem.Kind() {
case reflect.Struct:
for i, m := range meta {
if i >= len(row) {
break
}
field := findStructField(elem, m.Name)
if !field.IsValid() {
continue
}
if err := unmarshalIntoField(row[i], field); err != nil {
return fmt.Errorf("column %q: %w", m.Name, err)
}
}
case reflect.Map:
if elem.IsNil() {
elem.Set(reflect.MakeMap(elem.Type()))
}
for i, m := range meta {
if i >= len(row) {
break
}
var v any
if err := json.Unmarshal(row[i], &v); err != nil {
return err
}
elem.SetMapIndex(reflect.ValueOf(m.Name), reflect.ValueOf(v))
}
default:
return fmt.Errorf("chdbConn: Select: unsupported element kind %s", elem.Kind())
}
return nil
}
// findStructField returns the reflect.Value of the struct field corresponding to colName.
// Priority: `ch` tag → `json` tag → lowercased field name.
func findStructField(structVal reflect.Value, colName string) reflect.Value {
t := structVal.Type()
colLower := strings.ToLower(colName)
for i := range t.NumField() {
f := t.Field(i)
if tag, _, _ := strings.Cut(f.Tag.Get("ch"), ","); tag == colName {
return structVal.Field(i)
}
if tag, _, _ := strings.Cut(f.Tag.Get("json"), ","); tag == colName {
return structVal.Field(i)
}
if strings.ToLower(f.Name) == colLower {
return structVal.Field(i)
}
}
return reflect.Value{}
}
// unmarshalIntoField deserializes raw JSON into field, performing numeric conversions
// needed for ClickHouse integer types (UInt64, Int64, …).
func unmarshalIntoField(raw json.RawMessage, field reflect.Value) error {
dec := json.NewDecoder(strings.NewReader(string(raw)))
dec.UseNumber()
var v any
if err := dec.Decode(&v); err != nil {
return err
}
return assignToField(field, v)
}
// assignToField converts src (from json.Decoder with UseNumber) and assigns it to field.
func assignToField(field reflect.Value, src any) error {
if src == nil {
field.Set(reflect.Zero(field.Type()))
return nil
}
if num, ok := src.(json.Number); ok {
switch field.Kind() {
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
n, err := num.Int64()
if err != nil {
return err
}
field.SetUint(uint64(n))
return nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
n, err := num.Int64()
if err != nil {
return err
}
field.SetInt(n)
return nil
case reflect.Float32, reflect.Float64:
n, err := num.Float64()
if err != nil {
return err
}
field.SetFloat(n)
return nil
case reflect.String:
field.SetString(num.String())
return nil
}
}
// Handle []interface{} → []T conversions (ClickHouse arrays decoded from JSON).
if srcSlice, ok := src.([]interface{}); ok && field.Kind() == reflect.Slice {
result := reflect.MakeSlice(field.Type(), len(srcSlice), len(srcSlice))
for i, item := range srcSlice {
if err := assignToField(result.Index(i), item); err != nil {
return fmt.Errorf("slice element %d: %w", i, err)
}
}
field.Set(result)
return nil
}
srcVal := reflect.ValueOf(src)
if srcVal.Type().AssignableTo(field.Type()) {
field.Set(srcVal)
return nil
}
if srcVal.Type().ConvertibleTo(field.Type()) {
field.Set(srcVal.Convert(field.Type()))
return nil
}
return fmt.Errorf("cannot assign %T to %s", src, field.Type())
}

View File

@@ -0,0 +1,109 @@
//go:build chdb
package chdbtelemetrystore
import (
"fmt"
schemamigrator "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
chdb "github.com/chdb-io/chdb-go/chdb"
)
// runMigrations applies the full signoz-otel-collector logs schema against the given
// chdb session. It mirrors the same migration set that the collector runs on a real
// ClickHouse cluster (CustomRetentionLogsMigrations + LogsMigrationsV2), with the
// following chdb-specific adaptations:
//
// - CREATE DATABASE statements are prepended so the tables have a home.
// - Distributed engine tables are replaced with MergeTree ORDER BY tuple() so
// every "distributed_*" table is a real, writable table in single-node chdb.
// - Operations that don't make sense without a cluster (TTL materialisation,
// MATERIALIZE COLUMN, MODIFY SETTINGS with serialisation keys) are skipped.
func runMigrations(session *chdb.Session) error {
// Ensure databases exist before any table DDL.
for _, db := range []string{"signoz_logs", "signoz_metadata"} {
if err := execSQL(session, fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s", db)); err != nil {
return fmt.Errorf("create database %s: %w", db, err)
}
}
migrationSets := [][]schemamigrator.SchemaMigrationRecord{
schemamigrator.CustomRetentionLogsMigrations,
schemamigrator.MetadataMigrations,
schemamigrator.LogsMigrationsV2,
}
for _, set := range migrationSets {
for _, record := range set {
for _, op := range record.UpItems {
sql, skip := toChdbSQL(op)
if skip {
continue
}
if err := execSQL(session, sql); err != nil {
return fmt.Errorf("migration %d: %w", record.MigrationID, err)
}
}
}
}
return nil
}
// toChdbSQL converts a schemamigrator.Operation to a chdb-compatible SQL string.
// Returns (sql, skip=true) for operations that should be omitted in a single-node
// chdb context.
func toChdbSQL(op schemamigrator.Operation) (sql string, skip bool) {
switch o := op.(type) {
case schemamigrator.CreateTableOperation:
return adaptCreateTable(o), false
case schemamigrator.DropTableOperation:
// Idempotent; safe to run even if the table never existed.
return o.ToSQL(), false
case schemamigrator.AlterTableAddColumn,
schemamigrator.AlterTableAddIndex,
schemamigrator.AlterTableDropColumn,
schemamigrator.AlterTableDropIndex:
return o.ToSQL(), false
// TTL is a production data-retention concern; irrelevant for test sessions.
case schemamigrator.AlterTableModifyTTL,
schemamigrator.AlterTableDropTTL,
// Background mutation; not needed in ephemeral test tables.
schemamigrator.AlterTableMaterializeColumn,
// Includes serialisation settings (object_serialization_version, …) that
// may not be recognised by the embedded chdb build.
schemamigrator.AlterTableModifySettings,
// Materialized views are not required for query-generation tests.
schemamigrator.CreateMaterializedViewOperation:
return "", true
default:
// Unknown operation type — skip conservatively.
return "", true
}
}
// adaptCreateTable rewrites a CreateTableOperation for chdb:
// - If the engine is Distributed, it is replaced with a plain MergeTree so the
// "distributed_*" table is a real, directly-writable table on the single chdb
// node. This preserves the exact column list while dropping distribution.
// - All other engines (MergeTree, ReplacingMergeTree, …) are used as-is.
func adaptCreateTable(op schemamigrator.CreateTableOperation) string {
if op.Engine.EngineType() == "Distributed" {
op.Engine = schemamigrator.MergeTree{OrderBy: "tuple()"}
}
return op.ToSQL()
}
// execSQL runs a single SQL statement against the session and returns any error.
func execSQL(session *chdb.Session, sql string) error {
result, err := session.Query(sql, "CSV")
if err != nil {
return err
}
defer result.Free()
return result.Error()
}

View File

@@ -0,0 +1,63 @@
//go:build chdb
package chdbtelemetrystore
import (
"fmt"
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/SigNoz/signoz/pkg/telemetrystore"
chdb "github.com/chdb-io/chdb-go/chdb"
)
// Provider implements TelemetryStore using chdb-go for in-process ClickHouse execution.
//
// Unlike the mock-based provider (which uses go-sqlmock and requires pre-registered
// expectations), this provider actually executes SQL against an embedded ClickHouse engine.
// This makes it suitable for integration-style tests that need real query execution
// without an external ClickHouse server.
//
// # Session lifecycle
//
// chdb-go maintains a package-level singleton session. Creating multiple Provider
// instances in the same process shares the same underlying session, meaning DDL
// (CREATE TABLE, DROP TABLE, INSERT) issued by one consumer is visible to others.
// To maintain test isolation, use unique database or table names and call the cleanup
// function returned by New via t.Cleanup.
type Provider struct {
conn *chdbConn
cluster string
}
var _ telemetrystore.TelemetryStore = (*Provider)(nil)
// New creates a Provider backed by an in-process chdb session and runs the full
// signoz-otel-collector logs schema migrations so the tables are ready for use.
// The returned cleanup function closes the session and should be wired in via t.Cleanup.
func New() (*Provider, func(), error) {
session, err := chdb.NewSession()
if err != nil {
return nil, nil, fmt.Errorf("chdbtelemetrystore: failed to create session: %w", err)
}
if err := runMigrations(session); err != nil {
session.Close()
return nil, nil, fmt.Errorf("chdbtelemetrystore: schema migration failed: %w", err)
}
cleanup := func() { session.Close() }
return &Provider{
conn: &chdbConn{session: session},
cluster: "local",
}, cleanup, nil
}
// ClickhouseDB returns the chdb-backed clickhouse.Conn.
func (p *Provider) ClickhouseDB() clickhouse.Conn {
return p.conn
}
// Cluster returns the cluster name for this provider.
func (p *Provider) Cluster() string {
return p.cluster
}

View File

@@ -0,0 +1,144 @@
//go:build chdb
package chdbtelemetrystore
import (
"encoding/json"
"fmt"
"reflect"
"strings"
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
chdbpurego "github.com/chdb-io/chdb-go/chdb-purego"
)
// chdbRows implements clickhouse/v2/lib/driver.Rows over a parsed JSONCompact response.
type chdbRows struct {
meta []jsonMeta
data [][]json.RawMessage
cursor int
result chdbpurego.ChdbResult // held so we can Free() on Close
}
func newChdbRows(result chdbpurego.ChdbResult) (*chdbRows, error) {
str := result.String()
if strings.TrimSpace(str) == "" {
return &chdbRows{result: result, cursor: -1}, nil
}
var jr jsonCompactResult
if err := json.Unmarshal([]byte(str), &jr); err != nil {
return nil, fmt.Errorf("chdbRows: parse response: %w", err)
}
return &chdbRows{
meta: jr.Meta,
data: jr.Data,
cursor: -1,
result: result,
}, nil
}
func (r *chdbRows) Next() bool {
r.cursor++
return r.cursor < len(r.data)
}
// Scan copies the current row's columns into dest (positional pointer arguments).
func (r *chdbRows) Scan(dest ...any) error {
if r.cursor < 0 || r.cursor >= len(r.data) {
return fmt.Errorf("chdbRows: Scan called outside a valid row")
}
row := r.data[r.cursor]
for i, d := range dest {
if i >= len(row) {
break
}
dv := reflect.ValueOf(d)
if dv.Kind() != reflect.Ptr {
return fmt.Errorf("chdbRows: Scan dest[%d] must be a pointer", i)
}
if err := unmarshalIntoField(row[i], dv.Elem()); err != nil {
return fmt.Errorf("chdbRows: Scan col %d: %w", i, err)
}
}
return nil
}
// ScanStruct fills a struct from the current row using the same tag-based field
// matching as Select.
func (r *chdbRows) ScanStruct(dest any) error {
if r.cursor < 0 || r.cursor >= len(r.data) {
return fmt.Errorf("chdbRows: ScanStruct called outside a valid row")
}
elem := reflect.ValueOf(dest)
if elem.Kind() == reflect.Ptr {
elem = elem.Elem()
}
return scanRowIntoValue(r.meta, r.data[r.cursor], elem)
}
func (r *chdbRows) ColumnTypes() []driver.ColumnType {
types := make([]driver.ColumnType, len(r.meta))
for i, m := range r.meta {
types[i] = &chdbColumnType{name: m.Name, dbType: m.Type}
}
return types
}
func (r *chdbRows) Totals(_ ...any) error { return nil }
func (r *chdbRows) Columns() []string {
cols := make([]string, len(r.meta))
for i, m := range r.meta {
cols[i] = m.Name
}
return cols
}
func (r *chdbRows) Close() error {
if r.result != nil {
r.result.Free()
r.result = nil
}
return nil
}
func (r *chdbRows) Err() error { return nil }
// chdbRow wraps chdbRows and exposes the first row as clickhouse/v2/lib/driver.Row.
type chdbRow struct {
err error
rows *chdbRows
}
func (r *chdbRow) Err() error { return r.err }
func (r *chdbRow) Scan(dest ...any) error {
if r.err != nil {
return r.err
}
if !r.rows.Next() {
return fmt.Errorf("chdb: no rows in result set")
}
return r.rows.Scan(dest...)
}
func (r *chdbRow) ScanStruct(dest any) error {
if r.err != nil {
return r.err
}
if !r.rows.Next() {
return fmt.Errorf("chdb: no rows in result set")
}
return r.rows.ScanStruct(dest)
}
// chdbColumnType implements driver.ColumnType for chdb result metadata.
type chdbColumnType struct {
name string
dbType string
}
func (c *chdbColumnType) Name() string { return c.name }
func (c *chdbColumnType) Nullable() bool { return strings.HasPrefix(c.dbType, "Nullable") }
func (c *chdbColumnType) ScanType() reflect.Type { return reflect.TypeOf("") }
func (c *chdbColumnType) DatabaseTypeName() string { return c.dbType }

View File

@@ -1,43 +0,0 @@
package cloudintegrationtypes
import (
"time"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/valuer"
)
type Account struct {
types.Identifiable
types.TimeAuditable
ProviderAccountId *string `json:"providerAccountID,omitempty"`
Provider CloudProviderType `json:"provider"`
RemovedAt *time.Time `json:"removedAt,omitempty"`
AgentReport *AgentReport `json:"agentReport,omitempty"`
OrgID valuer.UUID `json:"orgID"`
Config *AccountConfig `json:"config,omitempty"`
}
// AgentReport represents heartbeats sent by the agent.
type AgentReport struct {
TimestampMillis int64 `json:"timestampMillis"`
Data map[string]any `json:"data"`
}
type GettableAccounts struct {
Accounts []*Account `json:"accounts"`
}
type GettableAccount = Account
type UpdatableAccount struct {
Config *AccountConfig `json:"config"`
}
type AccountConfig struct {
AWS *AWSAccountConfig `json:"aws,omitempty"`
}
type AWSAccountConfig struct {
Regions []string `json:"regions"`
}

View File

@@ -1,82 +0,0 @@
package cloudintegrationtypes
import (
"database/sql/driver"
"encoding/json"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/uptrace/bun"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/valuer"
)
var (
ErrCodeCloudIntegrationNotFound = errors.MustNewCode("cloud_integration_not_found")
ErrCodeCloudIntegrationAlreadyExists = errors.MustNewCode("cloud_integration_already_exists")
ErrCodeCloudIntegrationServiceAlreadyExists = errors.MustNewCode("cloud_integration_service_already_exists")
)
// StorableCloudIntegration represents a cloud integration stored in the database.
// This is also referred as "Account" in the context of cloud integrations.
type StorableCloudIntegration struct {
bun.BaseModel `bun:"table:cloud_integration"`
types.Identifiable
types.TimeAuditable
Provider CloudProviderType `bun:"provider,type:text"`
Config string `bun:"config,type:text"` // Config is provider-specific data in JSON string format
AccountID *string `bun:"account_id,type:text"`
LastAgentReport *StorableAgentReport `bun:"last_agent_report,type:text"`
RemovedAt *time.Time `bun:"removed_at,type:timestamp,nullzero"`
OrgID valuer.UUID `bun:"org_id,type:text"`
}
// StorableAgentReport represents the last heartbeat and arbitrary data sent by the agent
// as of now there is no use case for Data field, but keeping it for backwards compatibility with older structure.
type StorableAgentReport struct {
TimestampMillis int64 `json:"timestamp_millis"` // backward compatibility
Data map[string]any `json:"data"`
}
// StorableCloudIntegrationService is to store service config for a cloud integration, which is a cloud provider specific configuration.
type StorableCloudIntegrationService struct {
bun.BaseModel `bun:"table:cloud_integration_service,alias:cis"`
types.Identifiable
types.TimeAuditable
Type ServiceID `bun:"type,type:text,notnull"` // Keeping Type field name as is, but it is a service id
Config string `bun:"config,type:text"` // Config is cloud provider's service specific data in JSON string format
CloudIntegrationID valuer.UUID `bun:"cloud_integration_id,type:text"`
}
// Scan scans value from DB.
func (r *StorableAgentReport) Scan(src any) error {
var data []byte
switch v := src.(type) {
case []byte:
data = v
case string:
data = []byte(v)
default:
return errors.NewInternalf(errors.CodeInternal, "tried to scan from %T instead of string or bytes", src)
}
return json.Unmarshal(data, r)
}
// Value creates value to be stored in DB.
func (r *StorableAgentReport) Value() (driver.Value, error) {
if r == nil {
return nil, errors.NewInternalf(errors.CodeInternal, "agent report is nil")
}
serialized, err := json.Marshal(r)
if err != nil {
return nil, errors.WrapInternalf(
err, errors.CodeInternal, "couldn't serialize agent report to JSON",
)
}
// Return as string instead of []byte to ensure PostgreSQL stores as text, not bytes
return string(serialized), nil
}

View File

@@ -1,41 +0,0 @@
package cloudintegrationtypes
import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/valuer"
)
// CloudProviderType type alias.
type CloudProviderType struct{ valuer.String }
var (
// cloud providers.
CloudProviderTypeAWS = CloudProviderType{valuer.NewString("aws")}
CloudProviderTypeAzure = CloudProviderType{valuer.NewString("azure")}
// errors.
ErrCodeCloudProviderInvalidInput = errors.MustNewCode("invalid_cloud_provider")
AWSIntegrationUserEmail = valuer.MustNewEmail("aws-integration@signoz.io")
AzureIntegrationUserEmail = valuer.MustNewEmail("azure-integration@signoz.io")
)
// CloudIntegrationUserEmails is the list of valid emails for Cloud One Click integrations.
// This is used for validation and restrictions in different contexts, across codebase.
var CloudIntegrationUserEmails = []valuer.Email{
AWSIntegrationUserEmail,
AzureIntegrationUserEmail,
}
// NewCloudProvider returns a new CloudProviderType from a string.
// It validates the input and returns an error if the input is not valid cloud provider.
func NewCloudProvider(provider string) (CloudProviderType, error) {
switch provider {
case CloudProviderTypeAWS.StringValue():
return CloudProviderTypeAWS, nil
case CloudProviderTypeAzure.StringValue():
return CloudProviderTypeAzure, nil
default:
return CloudProviderType{}, errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput, "invalid cloud provider: %s", provider)
}
}

View File

@@ -1,88 +0,0 @@
package cloudintegrationtypes
import "github.com/SigNoz/signoz/pkg/types/integrationtypes"
type ConnectionArtifactRequest struct {
Aws *AWSConnectionArtifactRequest `json:"aws"`
}
type AWSConnectionArtifactRequest struct {
DeploymentRegion string `json:"deploymentRegion"`
Regions []string `json:"regions"`
}
type PostableConnectionArtifact = ConnectionArtifactRequest
type ConnectionArtifact struct {
Aws *AWSConnectionArtifact `json:"aws"`
}
type AWSConnectionArtifact struct {
ConnectionUrl string `json:"connectionURL"`
}
type GettableConnectionArtifact = ConnectionArtifact
type AccountStatus struct {
Id string `json:"id"`
ProviderAccountId *string `json:"providerAccountID,omitempty"`
Status integrationtypes.AccountStatus `json:"status"`
}
type GettableAccountStatus = AccountStatus
type AgentCheckInRequest struct {
// older backward compatible fields are mapped to new fields
// CloudIntegrationId string `json:"cloudIntegrationId"`
// AccountId string `json:"accountId"`
// New fields
ProviderAccountId string `json:"providerAccountId"`
CloudAccountId string `json:"cloudAccountId"`
Data map[string]any `json:"data,omitempty"`
}
type PostableAgentCheckInRequest struct {
AgentCheckInRequest
// following are backward compatible fields for older running agents
// which gets mapped to new fields in AgentCheckInRequest
CloudIntegrationId string `json:"cloud_integration_id"`
CloudAccountId string `json:"cloud_account_id"`
}
type GettableAgentCheckInResponse struct {
AgentCheckInResponse
// For backward compatibility
CloudIntegrationId string `json:"cloud_integration_id"`
AccountId string `json:"account_id"`
}
type AgentCheckInResponse struct {
// Older fields for backward compatibility are mapped to new fields below
// CloudIntegrationId string `json:"cloud_integration_id"`
// AccountId string `json:"account_id"`
// New fields
ProviderAccountId string `json:"providerAccountId"`
CloudAccountId string `json:"cloudAccountId"`
// IntegrationConfig populates data related to integration that is required for an agent
// to start collecting telemetry data
// keeping JSON key snake_case for backward compatibility
IntegrationConfig *IntegrationConfig `json:"integration_config,omitempty"`
}
type IntegrationConfig struct {
EnabledRegions []string `json:"enabledRegions"` // backward compatible
Telemetry *AWSCollectionStrategy `json:"telemetry,omitempty"` // backward compatible
// new fields
AWS *AWSIntegrationConfig `json:"aws,omitempty"`
}
type AWSIntegrationConfig struct {
EnabledRegions []string `json:"enabledRegions"`
Telemetry *AWSCollectionStrategy `json:"telemetry,omitempty"`
}

View File

@@ -1,103 +0,0 @@
package cloudintegrationtypes
import (
"github.com/SigNoz/signoz/pkg/errors"
)
var (
ErrCodeInvalidCloudRegion = errors.MustNewCode("invalid_cloud_region")
ErrCodeMismatchCloudProvider = errors.MustNewCode("cloud_provider_mismatch")
)
// List of all valid cloud regions on Amazon Web Services.
var ValidAWSRegions = map[string]struct{}{
"af-south-1": {}, // Africa (Cape Town).
"ap-east-1": {}, // Asia Pacific (Hong Kong).
"ap-northeast-1": {}, // Asia Pacific (Tokyo).
"ap-northeast-2": {}, // Asia Pacific (Seoul).
"ap-northeast-3": {}, // Asia Pacific (Osaka).
"ap-south-1": {}, // Asia Pacific (Mumbai).
"ap-south-2": {}, // Asia Pacific (Hyderabad).
"ap-southeast-1": {}, // Asia Pacific (Singapore).
"ap-southeast-2": {}, // Asia Pacific (Sydney).
"ap-southeast-3": {}, // Asia Pacific (Jakarta).
"ap-southeast-4": {}, // Asia Pacific (Melbourne).
"ca-central-1": {}, // Canada (Central).
"ca-west-1": {}, // Canada West (Calgary).
"eu-central-1": {}, // Europe (Frankfurt).
"eu-central-2": {}, // Europe (Zurich).
"eu-north-1": {}, // Europe (Stockholm).
"eu-south-1": {}, // Europe (Milan).
"eu-south-2": {}, // Europe (Spain).
"eu-west-1": {}, // Europe (Ireland).
"eu-west-2": {}, // Europe (London).
"eu-west-3": {}, // Europe (Paris).
"il-central-1": {}, // Israel (Tel Aviv).
"me-central-1": {}, // Middle East (UAE).
"me-south-1": {}, // Middle East (Bahrain).
"sa-east-1": {}, // South America (Sao Paulo).
"us-east-1": {}, // US East (N. Virginia).
"us-east-2": {}, // US East (Ohio).
"us-west-1": {}, // US West (N. California).
"us-west-2": {}, // US West (Oregon).
}
// List of all valid cloud regions for Microsoft Azure.
var ValidAzureRegions = map[string]struct{}{
"australiacentral": {}, // Australia Central
"australiacentral2": {}, // Australia Central 2
"australiaeast": {}, // Australia East
"australiasoutheast": {}, // Australia Southeast
"austriaeast": {}, // Austria East
"belgiumcentral": {}, // Belgium Central
"brazilsouth": {}, // Brazil South
"brazilsoutheast": {}, // Brazil Southeast
"canadacentral": {}, // Canada Central
"canadaeast": {}, // Canada East
"centralindia": {}, // Central India
"centralus": {}, // Central US
"chilecentral": {}, // Chile Central
"denmarkeast": {}, // Denmark East
"eastasia": {}, // East Asia
"eastus": {}, // East US
"eastus2": {}, // East US 2
"francecentral": {}, // France Central
"francesouth": {}, // France South
"germanynorth": {}, // Germany North
"germanywestcentral": {}, // Germany West Central
"indonesiacentral": {}, // Indonesia Central
"israelcentral": {}, // Israel Central
"italynorth": {}, // Italy North
"japaneast": {}, // Japan East
"japanwest": {}, // Japan West
"koreacentral": {}, // Korea Central
"koreasouth": {}, // Korea South
"malaysiawest": {}, // Malaysia West
"mexicocentral": {}, // Mexico Central
"newzealandnorth": {}, // New Zealand North
"northcentralus": {}, // North Central US
"northeurope": {}, // North Europe
"norwayeast": {}, // Norway East
"norwaywest": {}, // Norway West
"polandcentral": {}, // Poland Central
"qatarcentral": {}, // Qatar Central
"southafricanorth": {}, // South Africa North
"southafricawest": {}, // South Africa West
"southcentralus": {}, // South Central US
"southindia": {}, // South India
"southeastasia": {}, // Southeast Asia
"spaincentral": {}, // Spain Central
"swedencentral": {}, // Sweden Central
"switzerlandnorth": {}, // Switzerland North
"switzerlandwest": {}, // Switzerland West
"uaecentral": {}, // UAE Central
"uaenorth": {}, // UAE North
"uksouth": {}, // UK South
"ukwest": {}, // UK West
"westcentralus": {}, // West Central US
"westeurope": {}, // West Europe
"westindia": {}, // West India
"westus": {}, // West US
"westus2": {}, // West US 2
"westus3": {}, // West US 3
}

View File

@@ -1,248 +0,0 @@
package cloudintegrationtypes
import (
"fmt"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
var (
S3Sync = valuer.NewString("s3sync")
// ErrCodeInvalidServiceID is the error code for invalid service id.
ErrCodeInvalidServiceID = errors.MustNewCode("invalid_service_id")
)
type ServiceID struct{ valuer.String }
type CloudIntegrationService struct {
types.Identifiable
types.TimeAuditable
Type ServiceID `json:"type"`
Config *ServiceConfig `json:"config"`
CloudIntegrationID valuer.UUID `json:"cloudIntegrationID"`
}
// ServiceMetadata helps to quickly list available services and whether it is enabled or not.
// As getting complete service definition is a heavy operation and the response is also large,
// initial integration page load can be very slow.
type ServiceMetadata struct {
ServiceDefinitionMetadata
// if the service is enabled for the account
Enabled bool `json:"enabled"`
}
type GettableServicesMetadata struct {
Services []*ServiceMetadata `json:"services"`
}
type Service struct {
ServiceDefinition
ServiceConfig *ServiceConfig `json:"serviceConfig"`
}
type GettableService = Service
type UpdatableService struct {
Config *ServiceConfig `json:"config"`
}
type ServiceConfig struct {
AWS *AWSServiceConfig `json:"aws,omitempty"`
}
type AWSServiceConfig struct {
Logs *AWSServiceLogsConfig `json:"logs"`
Metrics *AWSServiceMetricsConfig `json:"metrics"`
}
// AWSServiceLogsConfig is AWS specific logs config for a service
// NOTE: the JSON keys are snake case for backward compatibility with existing agents.
type AWSServiceLogsConfig struct {
Enabled bool `json:"enabled"`
S3Buckets map[string][]string `json:"s3_buckets,omitempty"`
}
type AWSServiceMetricsConfig struct {
Enabled bool `json:"enabled"`
}
// ServiceDefinitionMetadata represents service definition metadata. This is useful for showing service tab in frontend.
type ServiceDefinitionMetadata struct {
Id string `json:"id"`
Title string `json:"title"`
Icon string `json:"icon"`
}
type ServiceDefinition struct {
ServiceDefinitionMetadata
Overview string `json:"overview"` // markdown
Assets Assets `json:"assets"`
SupportedSignals SupportedSignals `json:"supported_signals"`
DataCollected DataCollected `json:"dataCollected"`
Strategy *CollectionStrategy `json:"telemetryCollectionStrategy"`
}
// CollectionStrategy is cloud provider specific configuration for signal collection,
// this is used by agent to understand the nitty-gritty for collecting telemetry for the cloud provider.
type CollectionStrategy struct {
AWS *AWSCollectionStrategy `json:"aws,omitempty"`
}
// Assets represents the collection of dashboards.
type Assets struct {
Dashboards []Dashboard `json:"dashboards"`
}
// SupportedSignals for cloud provider's service.
type SupportedSignals struct {
Logs bool `json:"logs"`
Metrics bool `json:"metrics"`
}
// DataCollected is curated static list of metrics and logs, this is shown as part of service overview.
type DataCollected struct {
Logs []CollectedLogAttribute `json:"logs"`
Metrics []CollectedMetric `json:"metrics"`
}
// CollectedLogAttribute represents a log attribute that is present in all log entries for a service,
// this is shown as part of service overview.
type CollectedLogAttribute struct {
Name string `json:"name"`
Path string `json:"path"`
Type string `json:"type"`
}
// CollectedMetric represents a metric that is collected for a service, this is shown as part of service overview.
type CollectedMetric struct {
Name string `json:"name"`
Type string `json:"type"`
Unit string `json:"unit"`
Description string `json:"description"`
}
// AWSCollectionStrategy represents signal collection strategy for AWS services.
// this is AWS specific.
// NOTE: this structure is still using snake case, for backward compatibility,
// with existing agents.
type AWSCollectionStrategy struct {
Metrics *AWSMetricsStrategy `json:"aws_metrics,omitempty"`
Logs *AWSLogsStrategy `json:"aws_logs,omitempty"`
S3Buckets map[string][]string `json:"s3_buckets,omitempty"` // Only available in S3 Sync Service Type in AWS
}
// AWSMetricsStrategy represents metrics collection strategy for AWS services.
// this is AWS specific.
// NOTE: this structure is still using snake case, for backward compatibility,
// with existing agents.
type AWSMetricsStrategy struct {
// to be used as https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-metricstream.html#cfn-cloudwatch-metricstream-includefilters
StreamFilters []struct {
// json tags here are in the shape expected by AWS API as detailed at
// https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudwatch-metricstream-metricstreamfilter.html
Namespace string `json:"Namespace"`
MetricNames []string `json:"MetricNames,omitempty"`
} `json:"cloudwatch_metric_stream_filters"`
}
// AWSLogsStrategy represents logs collection strategy for AWS services.
// this is AWS specific.
// NOTE: this structure is still using snake case, for backward compatibility,
// with existing agents.
type AWSLogsStrategy struct {
Subscriptions []struct {
// subscribe to all logs groups with specified prefix.
// eg: `/aws/rds/`
LogGroupNamePrefix string `json:"log_group_name_prefix"`
// https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html
// "" implies no filtering is required.
FilterPattern string `json:"filter_pattern"`
} `json:"cloudwatch_logs_subscriptions"`
}
// Dashboard represents a dashboard definition for cloud integration.
// This is used to show available pre-made dashboards for a service,
// hence has additional fields like id, title and description
type Dashboard struct {
Id string `json:"id"`
Title string `json:"title"`
Description string `json:"description"`
Definition dashboardtypes.StorableDashboardData `json:"definition,omitempty"`
}
// SupportedServices is the map of supported services for each cloud provider.
var SupportedServices = map[CloudProviderType][]ServiceID{
CloudProviderTypeAWS: {
{valuer.NewString("alb")},
{valuer.NewString("api-gateway")},
{valuer.NewString("dynamodb")},
{valuer.NewString("ec2")},
{valuer.NewString("ecs")},
{valuer.NewString("eks")},
{valuer.NewString("elasticache")},
{valuer.NewString("lambda")},
{valuer.NewString("msk")},
{valuer.NewString("rds")},
{valuer.NewString("s3sync")},
{valuer.NewString("sns")},
{valuer.NewString("sqs")},
},
}
// NewServiceID returns a new ServiceID from a string, validated against the supported services for the given cloud provider.
func NewServiceID(provider CloudProviderType, service string) (ServiceID, error) {
services, ok := SupportedServices[provider]
if !ok {
return ServiceID{}, errors.NewInvalidInputf(ErrCodeInvalidServiceID, "no services defined for cloud provider: %s", provider)
}
for _, s := range services {
if s.StringValue() == service {
return s, nil
}
}
return ServiceID{}, errors.NewInvalidInputf(ErrCodeInvalidServiceID, "invalid service id %q for cloud provider %s", service, provider)
}
// UTILS
// GetCloudIntegrationDashboardID returns the dashboard id for a cloud integration, given the cloud provider, service id, and dashboard id.
// This is used to generate unique dashboard ids for cloud integration, and also to parse the dashboard id to get the cloud provider and service id when needed.
func GetCloudIntegrationDashboardID(cloudProvider CloudProviderType, svcId, dashboardId string) string {
return fmt.Sprintf("cloud-integration--%s--%s--%s", cloudProvider, svcId, dashboardId)
}
// GetDashboardsFromAssets returns the list of dashboards for the cloud provider service from definition.
func GetDashboardsFromAssets(
svcId string,
orgID valuer.UUID,
cloudProvider CloudProviderType,
createdAt time.Time,
assets Assets,
) []*dashboardtypes.Dashboard {
dashboards := make([]*dashboardtypes.Dashboard, 0)
for _, d := range assets.Dashboards {
author := fmt.Sprintf("%s-integration", cloudProvider)
dashboards = append(dashboards, &dashboardtypes.Dashboard{
ID: GetCloudIntegrationDashboardID(cloudProvider, svcId, d.Id),
Locked: true,
OrgID: orgID,
Data: d.Definition,
TimeAuditable: types.TimeAuditable{
CreatedAt: createdAt,
UpdatedAt: createdAt,
},
UserAuditable: types.UserAuditable{
CreatedBy: author,
UpdatedBy: author,
},
})
}
return dashboards
}

View File

@@ -1,41 +0,0 @@
package cloudintegrationtypes
import (
"context"
"github.com/SigNoz/signoz/pkg/valuer"
)
type Store interface {
// GetAccountByID returns a cloud integration account by id
GetAccountByID(ctx context.Context, orgID, id valuer.UUID, provider CloudProviderType) (*StorableCloudIntegration, error)
// GetConnectedAccount for a given provider
GetConnectedAccount(ctx context.Context, orgID valuer.UUID, provider CloudProviderType, providerAccountID string) (*StorableCloudIntegration, error)
// ListConnectedAccounts returns all the cloud integration accounts for the org and cloud provider
ListConnectedAccounts(ctx context.Context, orgID valuer.UUID, provider CloudProviderType) ([]*StorableCloudIntegration, error)
// CreateAccount creates a new cloud integration account
CreateAccount(ctx context.Context, account *StorableCloudIntegration) (*StorableCloudIntegration, error)
// UpdateAccount updates an existing cloud integration account
UpdateAccount(ctx context.Context, account *StorableCloudIntegration) error
// RemoveAccount marks a cloud integration account as removed by setting the RemovedAt field
RemoveAccount(ctx context.Context, orgID, id valuer.UUID, provider CloudProviderType) error
// cloud_integration_service related methods
// GetServiceByServiceID returns the cloud integration service for the given cloud integration id and service id
GetServiceByServiceID(ctx context.Context, cloudIntegrationID valuer.UUID, serviceID ServiceID) (*StorableCloudIntegrationService, error)
// ListServices returns all the cloud integration services for the given cloud integration id
ListServices(ctx context.Context, cloudIntegrationID valuer.UUID) ([]*StorableCloudIntegrationService, error)
// CreateService creates a new cloud integration service
CreateService(ctx context.Context, service *StorableCloudIntegrationService) (*StorableCloudIntegrationService, error)
// UpdateService updates an existing cloud integration service
UpdateService(ctx context.Context, service *StorableCloudIntegrationService) error
}

View File

@@ -1,13 +1,13 @@
package authtypes
package roletypes
import (
"context"
"encoding/json"
"regexp"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/valuer"
openfgav1 "github.com/openfga/api/proto/openfga/v1"
"github.com/uptrace/bun"
@@ -51,7 +51,7 @@ var (
)
var (
TypeableResourcesRoles = MustNewTypeableMetaResources(MustNewName("roles"))
TypeableResourcesRoles = authtypes.MustNewTypeableMetaResources(authtypes.MustNewName("roles"))
)
type StorableRole struct {
@@ -194,20 +194,20 @@ func (role *PatchableRole) UnmarshalJSON(data []byte) error {
return nil
}
func GetAdditionTuples(name string, orgID valuer.UUID, relation Relation, additions []*Object) ([]*openfgav1.TupleKey, error) {
func GetAdditionTuples(name string, orgID valuer.UUID, relation authtypes.Relation, additions []*authtypes.Object) ([]*openfgav1.TupleKey, error) {
tuples := make([]*openfgav1.TupleKey, 0)
for _, object := range additions {
typeable := MustNewTypeableFromType(object.Resource.Type, object.Resource.Name)
typeable := authtypes.MustNewTypeableFromType(object.Resource.Type, object.Resource.Name)
transactionTuples, err := typeable.Tuples(
MustNewSubject(
TypeableRole,
authtypes.MustNewSubject(
authtypes.TypeableRole,
name,
orgID,
&RelationAssignee,
&authtypes.RelationAssignee,
),
relation,
[]Selector{object.Selector},
[]authtypes.Selector{object.Selector},
orgID,
)
if err != nil {
@@ -220,20 +220,20 @@ func GetAdditionTuples(name string, orgID valuer.UUID, relation Relation, additi
return tuples, nil
}
func GetDeletionTuples(name string, orgID valuer.UUID, relation Relation, deletions []*Object) ([]*openfgav1.TupleKey, error) {
func GetDeletionTuples(name string, orgID valuer.UUID, relation authtypes.Relation, deletions []*authtypes.Object) ([]*openfgav1.TupleKey, error) {
tuples := make([]*openfgav1.TupleKey, 0)
for _, object := range deletions {
typeable := MustNewTypeableFromType(object.Resource.Type, object.Resource.Name)
typeable := authtypes.MustNewTypeableFromType(object.Resource.Type, object.Resource.Name)
transactionTuples, err := typeable.Tuples(
MustNewSubject(
TypeableRole,
authtypes.MustNewSubject(
authtypes.TypeableRole,
name,
orgID,
&RelationAssignee,
&authtypes.RelationAssignee,
),
relation,
[]Selector{object.Selector},
[]authtypes.Selector{object.Selector},
orgID,
)
if err != nil {
@@ -254,15 +254,3 @@ func MustGetSigNozManagedRoleFromExistingRole(role types.Role) string {
return managedRole
}
type RoleStore interface {
Create(context.Context, *StorableRole) error
Get(context.Context, valuer.UUID, valuer.UUID) (*StorableRole, error)
GetByOrgIDAndName(context.Context, valuer.UUID, string) (*StorableRole, error)
List(context.Context, valuer.UUID) ([]*StorableRole, error)
ListByOrgIDAndNames(context.Context, valuer.UUID, []string) ([]*StorableRole, error)
ListByOrgIDAndIDs(context.Context, valuer.UUID, []valuer.UUID) ([]*StorableRole, error)
Update(context.Context, valuer.UUID, *StorableRole) error
Delete(context.Context, valuer.UUID, valuer.UUID) error
RunInTx(context.Context, func(ctx context.Context) error) error
}

View File

@@ -0,0 +1,19 @@
package roletypes
import (
"context"
"github.com/SigNoz/signoz/pkg/valuer"
)
type Store interface {
Create(context.Context, *StorableRole) error
Get(context.Context, valuer.UUID, valuer.UUID) (*StorableRole, error)
GetByOrgIDAndName(context.Context, valuer.UUID, string) (*StorableRole, error)
List(context.Context, valuer.UUID) ([]*StorableRole, error)
ListByOrgIDAndNames(context.Context, valuer.UUID, []string) ([]*StorableRole, error)
ListByOrgIDAndIDs(context.Context, valuer.UUID, []valuer.UUID) ([]*StorableRole, error)
Update(context.Context, valuer.UUID, *StorableRole) error
Delete(context.Context, valuer.UUID, valuer.UUID) error
RunInTx(context.Context, func(ctx context.Context) error) error
}

View File

@@ -9,7 +9,7 @@ import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/roletypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
)
@@ -102,10 +102,10 @@ func NewServiceAccountFromStorables(storableServiceAccount *StorableServiceAccou
}
}
func NewServiceAccountsFromRoles(storableServiceAccounts []*StorableServiceAccount, roles []*authtypes.Role, serviceAccountIDToRoleIDsMap map[string][]valuer.UUID) []*ServiceAccount {
func NewServiceAccountsFromRoles(storableServiceAccounts []*StorableServiceAccount, roles []*roletypes.Role, serviceAccountIDToRoleIDsMap map[string][]valuer.UUID) []*ServiceAccount {
serviceAccounts := make([]*ServiceAccount, 0, len(storableServiceAccounts))
roleIDToRole := make(map[string]*authtypes.Role, len(roles))
roleIDToRole := make(map[string]*roletypes.Role, len(roles))
for _, role := range roles {
roleIDToRole[role.ID.String()] = role
}

View File

@@ -5,7 +5,7 @@ import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/roletypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
)
@@ -19,7 +19,7 @@ type StorableServiceAccountRole struct {
RoleID string `bun:"role_id"`
}
func NewStorableServiceAccountRoles(serviceAccountID valuer.UUID, roles []*authtypes.Role) []*StorableServiceAccountRole {
func NewStorableServiceAccountRoles(serviceAccountID valuer.UUID, roles []*roletypes.Role) []*StorableServiceAccountRole {
storableServiceAccountRoles := make([]*StorableServiceAccountRole, len(roles))
for idx, role := range roles {
storableServiceAccountRoles[idx] = &StorableServiceAccountRole{
@@ -38,7 +38,7 @@ func NewStorableServiceAccountRoles(serviceAccountID valuer.UUID, roles []*autht
return storableServiceAccountRoles
}
func NewRolesFromStorableServiceAccountRoles(storable []*StorableServiceAccountRole, roles []*authtypes.Role) ([]string, error) {
func NewRolesFromStorableServiceAccountRoles(storable []*StorableServiceAccountRole, roles []*roletypes.Role) ([]string, error) {
roleIDToName := make(map[string]string, len(roles))
for _, role := range roles {
roleIDToName[role.ID.String()] = role.Name

View File

@@ -45,8 +45,6 @@ type MetadataStore interface {
// GetFirstSeenFromMetricMetadata gets the first seen timestamp for a metric metadata lookup key.
GetFirstSeenFromMetricMetadata(ctx context.Context, lookupKeys []MetricMetadataLookupKey) (map[MetricMetadataLookupKey]int64, error)
FetchLastSeenInfoMulti(ctx context.Context, metricNames ...string) (map[string]int64, error)
}
type MetricMetadataLookupKey struct {

View File

@@ -342,7 +342,3 @@ func (m *MockMetadataStore) SetFirstSeenFromMetricMetadata(firstSeenMap map[tele
m.LookupKeysMap[key] = value
}
}
func (m *MockMetadataStore) FetchLastSeenInfoMulti(ctx context.Context, metricNames ...string) (map[string]int64, error) {
return make(map[string]int64), nil
}

View File

@@ -1,4 +1,3 @@
from datetime import datetime, timedelta, timezone
from http import HTTPStatus
from typing import Callable, List
@@ -8,7 +7,6 @@ from sqlalchemy import sql
from wiremock.resources.mappings import Mapping
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD, add_license
from fixtures.metrics import Metrics
from fixtures.types import Operation, SigNoz, TestContainerDocker
@@ -76,37 +74,9 @@ def test_public_dashboard_widget_query_range(
signoz: SigNoz,
create_user_admin: Operation, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_metrics: Callable[[List[Metrics]], None],
):
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
# Insert metric data so the widget query returns results instead of 404
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
metrics: List[Metrics] = [
Metrics(
metric_name="container.cpu.time",
labels={"service": "test-service"},
timestamp=now - timedelta(minutes=5),
value=100.0,
temporality="Cumulative",
),
Metrics(
metric_name="container.cpu.time",
labels={"service": "test-service"},
timestamp=now - timedelta(minutes=3),
value=200.0,
temporality="Cumulative",
),
Metrics(
metric_name="container.cpu.time",
labels={"service": "test-service"},
timestamp=now - timedelta(minutes=1),
value=300.0,
temporality="Cumulative",
),
]
insert_metrics(metrics)
dashboard_req = {
"title": "Test Widget Query Range Dashboard",
"description": "For testing widget query range",

View File

@@ -10,16 +10,12 @@ from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.metrics import Metrics
from fixtures.querier import (
assert_minutely_bucket_values,
build_builder_query,
find_named_result,
index_series_by_label,
make_query_request,
)
from fixtures.utils import get_testdata_file_path
FILL_GAPS = "fillGaps"
FILL_ZERO = "fillZero"
HISTOGRAM_FILE = get_testdata_file_path("histogram_data_1h.jsonl")
def _build_format_options(fill_mode: str) -> Dict[str, Any]:
@@ -584,39 +580,3 @@ def test_metrics_fill_formula_with_group_by(
expected_by_ts=expectations[group],
context=f"metrics/{fill_mode}/F1/{group}",
)
def test_histogram_p90_returns_404_outside_data_window(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_metrics: Callable[[List[Metrics]], None],
) -> None:
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
metric_name = "test_p90_last_seen_bucket"
metrics = Metrics.load_from_file(
HISTOGRAM_FILE,
base_time=now - timedelta(minutes=90),
metric_name_override=metric_name,
)
insert_metrics(metrics)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
query = build_builder_query(
"A",
metric_name,
"doesnotreallymatter",
"p90",
)
end_ms = int(now.timestamp() * 1000)
start_2h = int((now - timedelta(hours=2)).timestamp() * 1000)
response = make_query_request(signoz, token, start_2h, end_ms, [query])
assert response.status_code == HTTPStatus.OK
assert response.json()["status"] == "success"
start_15m = int((now - timedelta(minutes=15)).timestamp() * 1000)
response = make_query_request(signoz, token, start_15m, end_ms, [query])
assert response.status_code == HTTPStatus.NOT_FOUND