Compare commits

..

21 Commits

Author SHA1 Message Date
swapnil-signoz
9e468513c5 feat: adding module skeleton 2026-03-16 10:37:33 +05:30
swapnil-signoz
05062d49e9 feat: adding module skeleton 2026-03-14 19:19:14 +05:30
swapnil-signoz
11ed15f4c5 feat: implement cloud integration store 2026-03-14 17:05:02 +05:30
swapnil-signoz
f47877cca9 Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-14 17:01:51 +05:30
swapnil-signoz
bb2b9215ba fix: correct GetService signature and remove shadowed Data field 2026-03-14 16:59:07 +05:30
swapnil-signoz
3111904223 Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-14 16:36:35 +05:30
swapnil-signoz
003e2c30d8 Merge branch 'main' into refactor/cloud-integration-types 2026-03-14 16:25:35 +05:30
swapnil-signoz
00fe516d10 refactor: update cloud integration types and module interface 2026-03-14 16:25:16 +05:30
swapnil-signoz
0305f4f7db refactor: using struct for map 2026-03-13 16:09:26 +05:30
swapnil-signoz
c60019a6dc Merge branch 'main' into refactor/cloud-integration-types 2026-03-12 23:41:22 +05:30
swapnil-signoz
acde2a37fa feat: adding updated types for cloud integration 2026-03-12 23:40:44 +05:30
swapnil-signoz
945241a52a Merge branch 'main' into refactor/cloud-integration-types 2026-03-12 19:45:50 +05:30
swapnil-signoz
e967f80c86 Merge branch 'main' into refactor/cloud-integration-types 2026-03-02 16:39:42 +05:30
swapnil-signoz
a09dc325de Merge branch 'main' into refactor/cloud-integration-impl-store 2026-03-02 16:39:20 +05:30
swapnil-signoz
379b4f7fc4 refactor: removing interface check 2026-03-02 14:50:37 +05:30
swapnil-signoz
5e536ae077 Merge branch 'refactor/cloud-integration-types' into refactor/cloud-integration-impl-store 2026-03-02 14:49:35 +05:30
swapnil-signoz
234585e642 Merge branch 'main' into refactor/cloud-integration-types 2026-03-02 14:49:19 +05:30
swapnil-signoz
2cc14f1ad4 Merge branch 'main' into refactor/cloud-integration-impl-store 2026-03-02 14:49:00 +05:30
swapnil-signoz
dc4ed4d239 feat: adding sql store implementation 2026-03-02 14:44:56 +05:30
swapnil-signoz
7281c36873 refactor: store interfaces to use local types and error 2026-03-02 13:27:46 +05:30
swapnil-signoz
40288776e8 feat: adding cloud integration type for refactor 2026-02-28 16:59:14 +05:30
63 changed files with 2441 additions and 5948 deletions

View File

@@ -1,59 +0,0 @@
name: mergequeueci
on:
pull_request:
types:
- dequeued
jobs:
notify:
runs-on: ubuntu-latest
steps:
- name: alert
uses: slackapi/slack-github-action@v2.1.1
with:
webhook: ${{ secrets.SLACK_MERGE_QUEUE_WEBHOOK }}
webhook-type: incoming-webhook
payload: |
{
"text": ":x: PR removed from merge queue",
"blocks": [
{
"type": "header",
"text": {
"type": "plain_text",
"text": ":x: PR Removed from Merge Queue"
}
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*<${{ github.event.pull_request.html_url }}|PR #${{ github.event.pull_request.number }}: ${{ github.event.pull_request.title }}>*"
}
},
{
"type": "divider"
},
{
"type": "section",
"fields": [
{
"type": "mrkdwn",
"text": "*Author*\n@${{ github.event.pull_request.user.login }}"
}
]
}
]
}
- name: comment
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ github.event.pull_request.number }}
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
PR_URL: ${{ github.event.pull_request.html_url }}
run: |
gh api repos/${{ github.repository }}/issues/$PR_NUMBER/comments \
-f body="> :x: **PR removed from merge queue**
>
> @$PR_AUTHOR your PR was removed from the merge queue. Fix the issue and re-queue when ready."

View File

@@ -17,7 +17,5 @@
},
"[html]": {
"editor.defaultFormatter": "vscode.html-language-features"
},
"python-envs.defaultEnvManager": "ms-python.python:system",
"python-envs.pythonProjects": []
}
}

View File

@@ -13,8 +13,11 @@ import (
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/gateway"
"github.com/SigNoz/signoz/pkg/gateway/noopgateway"
"github.com/SigNoz/signoz/pkg/global"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/licensing/nooplicensing"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration/implcloudintegration"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
"github.com/SigNoz/signoz/pkg/modules/organization"
@@ -88,6 +91,9 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
func(ps factory.ProviderSettings, q querier.Querier, a analytics.Analytics) querier.Handler {
return querier.NewHandler(ps, q, a)
},
func(_ sqlstore.SQLStore, _ licensing.Licensing, _ zeus.Zeus, _ gateway.Gateway, _ global.Config) cloudintegration.Module {
return implcloudintegration.NewModule()
},
)
if err != nil {
logger.ErrorContext(ctx, "failed to create signoz", "error", err)

View File

@@ -9,12 +9,13 @@ import (
"github.com/SigNoz/signoz/ee/authn/callbackauthn/oidccallbackauthn"
"github.com/SigNoz/signoz/ee/authn/callbackauthn/samlcallbackauthn"
"github.com/SigNoz/signoz/ee/authz/openfgaauthz"
eequerier "github.com/SigNoz/signoz/ee/querier"
"github.com/SigNoz/signoz/ee/authz/openfgaschema"
"github.com/SigNoz/signoz/ee/gateway/httpgateway"
enterpriselicensing "github.com/SigNoz/signoz/ee/licensing"
"github.com/SigNoz/signoz/ee/licensing/httplicensing"
"github.com/SigNoz/signoz/ee/modules/cloudintegration/implcloudintegration"
"github.com/SigNoz/signoz/ee/modules/dashboard/impldashboard"
eequerier "github.com/SigNoz/signoz/ee/querier"
enterpriseapp "github.com/SigNoz/signoz/ee/query-service/app"
"github.com/SigNoz/signoz/ee/sqlschema/postgressqlschema"
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
@@ -25,7 +26,10 @@ import (
"github.com/SigNoz/signoz/pkg/authz"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/gateway"
"github.com/SigNoz/signoz/pkg/global"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
pkgimplcloudintegration "github.com/SigNoz/signoz/pkg/modules/cloudintegration/implcloudintegration"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
pkgimpldashboard "github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
"github.com/SigNoz/signoz/pkg/modules/organization"
@@ -129,6 +133,9 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
communityHandler := querier.NewHandler(ps, q, a)
return eequerier.NewHandler(ps, q, communityHandler)
},
func(store sqlstore.SQLStore, lic licensing.Licensing, z zeus.Zeus, gw gateway.Gateway, gc global.Config) cloudintegration.Module {
return implcloudintegration.NewModule(pkgimplcloudintegration.NewStore(store), store, lic, z, gw, gc)
},
)
if err != nil {

View File

@@ -3001,210 +3001,6 @@ paths:
summary: Update auth domain
tags:
- authdomains
/api/v1/export_raw_data:
get:
deprecated: false
description: This endpoints allows simple query exporting raw data for traces
and logs
operationId: HandleExportRawDataGET
parameters:
- description: The output format for the export.
in: query
name: format
schema:
default: csv
description: The output format for the export.
enum:
- csv
- jsonl
type: string
- description: The type of data to export.
in: query
name: signal
required: true
schema:
$ref: '#/components/schemas/TelemetrytypesSignal'
- deprecated: true
description: 'Deprecated: use signal instead.'
in: query
name: source
schema:
deprecated: true
description: 'Deprecated: use signal instead.'
type: string
- description: The start time for the query in unix timestamp nanoseconds.
in: query
name: start
schema:
description: The start time for the query in unix timestamp nanoseconds.
minimum: 0
type: integer
- description: The end time for the query in unix timestamp nanoseconds.
in: query
name: end
schema:
description: The end time for the query in unix timestamp nanoseconds.
minimum: 0
type: integer
- description: The maximum number of rows to export.
in: query
name: limit
schema:
default: 10000
description: The maximum number of rows to export.
maximum: 50000
minimum: 1
type: integer
- deprecated: true
description: 'Deprecated: use filterExpression instead.'
in: query
name: filter
schema:
deprecated: true
description: 'Deprecated: use filterExpression instead.'
type: string
- content:
application/json:
schema:
$ref: '#/components/schemas/Querybuildertypesv5Filter'
description: The filter expression to apply to the query.
in: query
name: filterExpression
- deprecated: true
description: 'Deprecated: use selectFields instead.'
in: query
name: columns
schema:
deprecated: true
description: 'Deprecated: use selectFields instead.'
items:
type: string
type: array
- description: The columns to include in the export.
in: query
name: selectFields
schema:
description: The columns to include in the export.
items:
$ref: '#/components/schemas/TelemetrytypesTelemetryFieldKey'
type: array
- deprecated: true
description: 'Deprecated: use order instead.'
in: query
name: order_by
schema:
deprecated: true
description: 'Deprecated: use order instead.'
type: string
- description: The sorting order with keys and directions.
in: query
name: order
schema:
description: The sorting order with keys and directions.
items:
$ref: '#/components/schemas/Querybuildertypesv5OrderBy'
type: array
responses:
"200":
content:
application/json:
schema:
type: string
description: OK
"400":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Bad Request
"401":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Unauthorized
"403":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Forbidden
"500":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Internal Server Error
security:
- api_key:
- VIEWER
- tokenizer:
- VIEWER
summary: Export raw data
tags:
- logs
- traces
post:
deprecated: false
description: This endpoints allows complex query exporting raw data for traces
and logs
operationId: HandleExportRawDataPOST
parameters:
- description: The output format for the export.
in: query
name: format
schema:
default: csv
description: The output format for the export.
enum:
- csv
- jsonl
type: string
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/Querybuildertypesv5QueryRangeRequest'
responses:
"200":
content:
application/json:
schema:
type: string
description: OK
"400":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Bad Request
"401":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Unauthorized
"403":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Forbidden
"500":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Internal Server Error
security:
- api_key:
- VIEWER
- tokenizer:
- VIEWER
summary: Export raw data
tags:
- logs
- traces
/api/v1/fields/keys:
get:
deprecated: false

View File

@@ -123,7 +123,6 @@ if err := router.Handle("/api/v1/things", handler.New(
Description: "This endpoint creates a thing",
Request: new(types.PostableThing),
RequestContentType: "application/json",
RequestQuery: new(types.QueryableThing),
Response: new(types.GettableThing),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusCreated,
@@ -156,8 +155,6 @@ The `handler.New` function ties the HTTP handler to OpenAPI metadata via `OpenAP
- **Request / RequestContentType**:
- `Request` is a Go type that describes the request body or form.
- `RequestContentType` is usually `"application/json"` or `"application/x-www-form-urlencoded"` (for callbacks like SAML).
- **RequestQuery**:
- `RequestQuery` is a Go type that descirbes query url params.
- **RequestExamples**: An array of `handler.OpenAPIExample` that provide concrete request payloads in the generated spec. See [Adding request examples](#adding-request-examples) below.
- **Response / ResponseContentType**:
- `Response` is the Go type for the successful response payload.

View File

@@ -0,0 +1,22 @@
package implcloudintegration
import (
"context"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
"github.com/SigNoz/signoz/pkg/types/cloudintegrationtypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type awsProvider struct{}
func (p *awsProvider) CreateArtifact(
_ context.Context,
_ valuer.UUID,
_ *cloudintegrationtypes.ConnectionArtifactRequest,
_ cloudintegration.Credentials,
_ valuer.UUID,
) (*cloudintegrationtypes.ConnectionArtifact, error) {
return nil, errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "not implemented")
}

View File

@@ -0,0 +1,23 @@
package implcloudintegration
import (
"context"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
"github.com/SigNoz/signoz/pkg/types/cloudintegrationtypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
// keeping this for example to show how more cloud providers will be added
type azureProvider struct{}
func (p *azureProvider) CreateArtifact(
_ context.Context,
_ valuer.UUID,
_ *cloudintegrationtypes.ConnectionArtifactRequest,
_ cloudintegration.Credentials,
_ valuer.UUID,
) (*cloudintegrationtypes.ConnectionArtifact, error) {
return nil, errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "not implemented")
}

View File

@@ -0,0 +1,267 @@
package implcloudintegration
import (
"context"
"fmt"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/gateway"
"github.com/SigNoz/signoz/pkg/global"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
"github.com/SigNoz/signoz/pkg/modules/user/impluser"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/cloudintegrationtypes"
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
"github.com/SigNoz/signoz/pkg/types/zeustypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/SigNoz/signoz/pkg/zeus"
)
type module struct {
store cloudintegrationtypes.Store
userStore types.UserStore
licensing licensing.Licensing
zeus zeus.Zeus
gateway gateway.Gateway
globalConfig global.Config
providers map[cloudintegrationtypes.CloudProviderType]cloudintegration.CloudProvider
}
func NewModule(
store cloudintegrationtypes.Store,
sqlStore sqlstore.SQLStore,
lic licensing.Licensing,
z zeus.Zeus,
gw gateway.Gateway,
gc global.Config,
) cloudintegration.Module {
return &module{
store: store,
userStore: impluser.NewStore(sqlStore, factory.ProviderSettings{}),
licensing: lic,
zeus: z,
gateway: gw,
globalConfig: gc,
providers: map[cloudintegrationtypes.CloudProviderType]cloudintegration.CloudProvider{
cloudintegrationtypes.CloudProviderTypeAWS: &awsProvider{},
cloudintegrationtypes.CloudProviderTypeAzure: &azureProvider{},
},
}
}
func (m *module) CreateConnectionArtifact(ctx context.Context, orgID valuer.UUID, provider cloudintegrationtypes.CloudProviderType, request *cloudintegrationtypes.ConnectionArtifactRequest) (*cloudintegrationtypes.ConnectionArtifact, error) {
p, ok := m.providers[provider]
if !ok {
return nil, errors.NewInvalidInputf(cloudintegrationtypes.ErrCodeCloudProviderInvalidInput, "unsupported cloud provider: %s", provider.StringValue())
}
creds, err := m.resolveCredentials(ctx, orgID, provider)
if err != nil {
return nil, err
}
newAccountID := valuer.GenerateUUID()
artifact, err := p.CreateArtifact(ctx, orgID, request, creds, newAccountID)
if err != nil {
return nil, err
}
account := &cloudintegrationtypes.StorableCloudIntegration{
Identifiable: types.Identifiable{ID: newAccountID},
TimeAuditable: types.TimeAuditable{
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
Provider: provider,
OrgID: orgID,
}
if err := m.store.UpsertAccount(ctx, account); err != nil {
return nil, err
}
return artifact, nil
}
func (m *module) resolveCredentials(ctx context.Context, orgID valuer.UUID, provider cloudintegrationtypes.CloudProviderType) (cloudintegration.Credentials, error) {
creds := cloudintegration.Credentials{}
pat, err := m.getOrCreateIntegrationPAT(ctx, orgID, provider)
if err != nil {
return creds, err
}
creds.SigNozAPIKey = pat
if m.licensing == nil {
return creds, nil
}
license, err := m.licensing.GetActive(ctx, orgID)
if err != nil {
return creds, err
}
if license == nil {
return creds, nil
}
respBytes, err := m.zeus.GetDeployment(ctx, license.Key)
if err != nil {
return creds, errors.NewInternalf(errors.CodeInternal, "couldn't query deployment info: %v", err)
}
deployment, err := zeustypes.NewGettableDeployment(respBytes)
if err != nil {
return creds, err
}
creds.SigNozAPIUrl = deployment.SignozAPIUrl
if m.globalConfig.IngestionURL != nil {
creds.IngestionUrl = m.globalConfig.IngestionURL.String()
}
if m.gateway != nil {
ingestionKeyName := fmt.Sprintf("%s-integration", provider.StringValue())
ingestionKey, err := m.getOrCreateIngestionKey(ctx, orgID, ingestionKeyName)
if err != nil {
return creds, err
}
creds.IngestionKey = ingestionKey
}
return creds, nil
}
func (m *module) getOrCreateIngestionKey(ctx context.Context, orgID valuer.UUID, keyName string) (string, error) {
result, err := m.gateway.SearchIngestionKeysByName(ctx, orgID, keyName, 1, 10)
if err != nil {
return "", errors.NewInternalf(errors.CodeInternal, "couldn't search ingestion keys: %v", err)
}
for _, k := range result.Keys {
if k.Name == keyName {
return k.Value, nil
}
}
created, err := m.gateway.CreateIngestionKey(ctx, orgID, keyName, []string{"integration"}, time.Time{})
if err != nil {
return "", errors.NewInternalf(errors.CodeInternal, "couldn't create ingestion key: %v", err)
}
return created.Value, nil
}
func (m *module) getOrCreateIntegrationPAT(ctx context.Context, orgID valuer.UUID, provider cloudintegrationtypes.CloudProviderType) (string, error) {
integrationPATName := fmt.Sprintf("%s integration", provider.StringValue())
integrationUser, err := m.getOrCreateIntegrationUser(ctx, orgID, provider)
if err != nil {
return "", err
}
allPATs, err := m.userStore.ListAPIKeys(ctx, orgID)
if err != nil {
return "", errors.NewInternalf(errors.CodeInternal, "couldn't list PATs: %v", err)
}
for _, p := range allPATs {
if p.UserID == integrationUser.ID && p.Name == integrationPATName {
return p.Token, nil
}
}
newPAT, err := types.NewStorableAPIKey(
integrationPATName,
integrationUser.ID,
types.RoleViewer,
0,
)
if err != nil {
return "", errors.NewInternalf(errors.CodeInternal, "couldn't create cloud integration PAT: %v", err)
}
if err := m.userStore.CreateAPIKey(ctx, newPAT); err != nil {
return "", errors.NewInternalf(errors.CodeInternal, "couldn't persist cloud integration PAT: %v", err)
}
return newPAT.Token, nil
}
func (m *module) getOrCreateIntegrationUser(ctx context.Context, orgID valuer.UUID, provider cloudintegrationtypes.CloudProviderType) (*types.User, error) {
cloudIntegrationUserName := fmt.Sprintf("%s-integration", provider.StringValue())
email := valuer.MustNewEmail(fmt.Sprintf("%s@signoz.io", cloudIntegrationUserName))
existingUsers, err := m.userStore.GetUsersByEmailAndOrgID(ctx, email, orgID)
if err != nil {
return nil, errors.NewInternalf(errors.CodeInternal, "couldn't look up integration user: %v", err)
}
for _, u := range existingUsers {
if u.Status != types.UserStatusDeleted {
return u, nil
}
}
cloudIntegrationUser, err := types.NewUser(cloudIntegrationUserName, email, types.RoleViewer, orgID, types.UserStatusActive)
if err != nil {
return nil, errors.NewInternalf(errors.CodeInternal, "couldn't construct integration user: %v", err)
}
password := types.MustGenerateFactorPassword(cloudIntegrationUser.ID.StringValue())
if err := m.userStore.CreateUser(ctx, cloudIntegrationUser); err != nil {
return nil, errors.NewInternalf(errors.CodeInternal, "couldn't create integration user: %v", err)
}
if err := m.userStore.CreatePassword(ctx, password); err != nil {
return nil, errors.NewInternalf(errors.CodeInternal, "couldn't create integration user password: %v", err)
}
return cloudIntegrationUser, nil
}
func (m *module) GetAccountStatus(_ context.Context, _, _ valuer.UUID) (*cloudintegrationtypes.AccountStatus, error) {
return nil, errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "not implemented")
}
func (m *module) ListConnectedAccounts(_ context.Context, _ valuer.UUID) (*cloudintegrationtypes.ConnectedAccounts, error) {
return nil, errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "not implemented")
}
func (m *module) DisconnectAccount(_ context.Context, _, _ valuer.UUID) error {
return errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "not implemented")
}
func (m *module) UpdateAccountConfig(_ context.Context, _, _ valuer.UUID, _ *cloudintegrationtypes.UpdateAccountConfigRequest) (*cloudintegrationtypes.Account, error) {
return nil, errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "not implemented")
}
func (m *module) ListServicesSummary(_ context.Context, _ valuer.UUID, _ *valuer.UUID) (*cloudintegrationtypes.ServicesSummary, error) {
return nil, errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "not implemented")
}
func (m *module) GetService(_ context.Context, _ valuer.UUID, _ string, _ *valuer.UUID) (*cloudintegrationtypes.Service, error) {
return nil, errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "not implemented")
}
func (m *module) UpdateServiceConfig(_ context.Context, _ string, _ valuer.UUID, _ *cloudintegrationtypes.UpdateServiceConfigRequest) (*cloudintegrationtypes.ServiceSummary, error) {
return nil, errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "not implemented")
}
func (m *module) AgentCheckIn(_ context.Context, _ valuer.UUID, _ *cloudintegrationtypes.AgentCheckInRequest) (cloudintegrationtypes.AgentCheckInResponse, error) {
return cloudintegrationtypes.AgentCheckInResponse{}, errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "not implemented")
}
func (m *module) GetDashboardByID(_ context.Context, _ string, _ valuer.UUID) (*dashboardtypes.Dashboard, error) {
return nil, errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "not implemented")
}
func (m *module) GetAllDashboards(_ context.Context, _ valuer.UUID) ([]*dashboardtypes.Dashboard, error) {
return nil, errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "not implemented")
}

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 214 KiB

View File

@@ -20,214 +20,11 @@ import { useMutation, useQuery } from 'react-query';
import type { BodyType, ErrorType } from '../../../generatedAPIInstance';
import { GeneratedAPIInstance } from '../../../generatedAPIInstance';
import type {
HandleExportRawDataGETParams,
HandleExportRawDataPOSTParams,
ListPromotedAndIndexedPaths200,
PromotetypesPromotePathDTO,
Querybuildertypesv5QueryRangeRequestDTO,
RenderErrorResponseDTO,
} from '../sigNoz.schemas';
/**
* This endpoints allows simple query exporting raw data for traces and logs
* @summary Export raw data
*/
export const handleExportRawDataGET = (
params: HandleExportRawDataGETParams,
signal?: AbortSignal,
) => {
return GeneratedAPIInstance<string>({
url: `/api/v1/export_raw_data`,
method: 'GET',
params,
signal,
});
};
export const getHandleExportRawDataGETQueryKey = (
params?: HandleExportRawDataGETParams,
) => {
return [`/api/v1/export_raw_data`, ...(params ? [params] : [])] as const;
};
export const getHandleExportRawDataGETQueryOptions = <
TData = Awaited<ReturnType<typeof handleExportRawDataGET>>,
TError = ErrorType<RenderErrorResponseDTO>
>(
params: HandleExportRawDataGETParams,
options?: {
query?: UseQueryOptions<
Awaited<ReturnType<typeof handleExportRawDataGET>>,
TError,
TData
>;
},
) => {
const { query: queryOptions } = options ?? {};
const queryKey =
queryOptions?.queryKey ?? getHandleExportRawDataGETQueryKey(params);
const queryFn: QueryFunction<
Awaited<ReturnType<typeof handleExportRawDataGET>>
> = ({ signal }) => handleExportRawDataGET(params, signal);
return { queryKey, queryFn, ...queryOptions } as UseQueryOptions<
Awaited<ReturnType<typeof handleExportRawDataGET>>,
TError,
TData
> & { queryKey: QueryKey };
};
export type HandleExportRawDataGETQueryResult = NonNullable<
Awaited<ReturnType<typeof handleExportRawDataGET>>
>;
export type HandleExportRawDataGETQueryError = ErrorType<RenderErrorResponseDTO>;
/**
* @summary Export raw data
*/
export function useHandleExportRawDataGET<
TData = Awaited<ReturnType<typeof handleExportRawDataGET>>,
TError = ErrorType<RenderErrorResponseDTO>
>(
params: HandleExportRawDataGETParams,
options?: {
query?: UseQueryOptions<
Awaited<ReturnType<typeof handleExportRawDataGET>>,
TError,
TData
>;
},
): UseQueryResult<TData, TError> & { queryKey: QueryKey } {
const queryOptions = getHandleExportRawDataGETQueryOptions(params, options);
const query = useQuery(queryOptions) as UseQueryResult<TData, TError> & {
queryKey: QueryKey;
};
query.queryKey = queryOptions.queryKey;
return query;
}
/**
* @summary Export raw data
*/
export const invalidateHandleExportRawDataGET = async (
queryClient: QueryClient,
params: HandleExportRawDataGETParams,
options?: InvalidateOptions,
): Promise<QueryClient> => {
await queryClient.invalidateQueries(
{ queryKey: getHandleExportRawDataGETQueryKey(params) },
options,
);
return queryClient;
};
/**
* This endpoints allows complex query exporting raw data for traces and logs
* @summary Export raw data
*/
export const handleExportRawDataPOST = (
querybuildertypesv5QueryRangeRequestDTO: BodyType<Querybuildertypesv5QueryRangeRequestDTO>,
params?: HandleExportRawDataPOSTParams,
signal?: AbortSignal,
) => {
return GeneratedAPIInstance<string>({
url: `/api/v1/export_raw_data`,
method: 'POST',
headers: { 'Content-Type': 'application/json' },
data: querybuildertypesv5QueryRangeRequestDTO,
params,
signal,
});
};
export const getHandleExportRawDataPOSTMutationOptions = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof handleExportRawDataPOST>>,
TError,
{
data: BodyType<Querybuildertypesv5QueryRangeRequestDTO>;
params?: HandleExportRawDataPOSTParams;
},
TContext
>;
}): UseMutationOptions<
Awaited<ReturnType<typeof handleExportRawDataPOST>>,
TError,
{
data: BodyType<Querybuildertypesv5QueryRangeRequestDTO>;
params?: HandleExportRawDataPOSTParams;
},
TContext
> => {
const mutationKey = ['handleExportRawDataPOST'];
const { mutation: mutationOptions } = options
? options.mutation &&
'mutationKey' in options.mutation &&
options.mutation.mutationKey
? options
: { ...options, mutation: { ...options.mutation, mutationKey } }
: { mutation: { mutationKey } };
const mutationFn: MutationFunction<
Awaited<ReturnType<typeof handleExportRawDataPOST>>,
{
data: BodyType<Querybuildertypesv5QueryRangeRequestDTO>;
params?: HandleExportRawDataPOSTParams;
}
> = (props) => {
const { data, params } = props ?? {};
return handleExportRawDataPOST(data, params);
};
return { mutationFn, ...mutationOptions };
};
export type HandleExportRawDataPOSTMutationResult = NonNullable<
Awaited<ReturnType<typeof handleExportRawDataPOST>>
>;
export type HandleExportRawDataPOSTMutationBody = BodyType<Querybuildertypesv5QueryRangeRequestDTO>;
export type HandleExportRawDataPOSTMutationError = ErrorType<RenderErrorResponseDTO>;
/**
* @summary Export raw data
*/
export const useHandleExportRawDataPOST = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof handleExportRawDataPOST>>,
TError,
{
data: BodyType<Querybuildertypesv5QueryRangeRequestDTO>;
params?: HandleExportRawDataPOSTParams;
},
TContext
>;
}): UseMutationResult<
Awaited<ReturnType<typeof handleExportRawDataPOST>>,
TError,
{
data: BodyType<Querybuildertypesv5QueryRangeRequestDTO>;
params?: HandleExportRawDataPOSTParams;
},
TContext
> => {
const mutationOptions = getHandleExportRawDataPOSTMutationOptions(options);
return useMutation(mutationOptions);
};
/**
* This endpoints promotes and indexes paths
* @summary Promote and index paths

View File

@@ -2896,89 +2896,6 @@ export type DeleteAuthDomainPathParameters = {
export type UpdateAuthDomainPathParameters = {
id: string;
};
export type HandleExportRawDataGETParams = {
/**
* @enum csv,jsonl
* @type string
* @description The output format for the export.
*/
format?: HandleExportRawDataGETFormat;
/**
* @description The type of data to export.
*/
signal: TelemetrytypesSignalDTO;
/**
* @type string
* @description Deprecated: use signal instead.
*/
source?: string;
/**
* @type integer
* @minimum 0
* @description The start time for the query in unix timestamp nanoseconds.
*/
start?: number;
/**
* @type integer
* @minimum 0
* @description The end time for the query in unix timestamp nanoseconds.
*/
end?: number;
/**
* @type integer
* @maximum 50000
* @minimum 1
* @description The maximum number of rows to export.
*/
limit?: number;
/**
* @type string
* @description Deprecated: use filterExpression instead.
*/
filter?: string;
/**
* @description The filter expression to apply to the query.
*/
filterExpression?: Querybuildertypesv5FilterDTO;
/**
* @type array
* @description Deprecated: use selectFields instead.
*/
columns?: string[];
/**
* @type array
* @description The columns to include in the export.
*/
selectFields?: TelemetrytypesTelemetryFieldKeyDTO[];
/**
* @type string
* @description Deprecated: use order instead.
*/
order_by?: string;
/**
* @type array
* @description The sorting order with keys and directions.
*/
order?: Querybuildertypesv5OrderByDTO[];
};
export enum HandleExportRawDataGETFormat {
csv = 'csv',
jsonl = 'jsonl',
}
export type HandleExportRawDataPOSTParams = {
/**
* @enum csv,jsonl
* @type string
* @description The output format for the export.
*/
format?: HandleExportRawDataPOSTFormat;
};
export enum HandleExportRawDataPOSTFormat {
csv = 'csv',
jsonl = 'jsonl',
}
export type GetFieldsKeysParams = {
/**
* @description undefined

View File

@@ -1,6 +1,5 @@
// ** Helpers
import { MetrictypesTypeDTO } from 'api/generated/services/sigNoz.schemas';
import { defaultTraceSelectedColumns } from 'container/OptionsMenu/constants';
import { createIdFromObjectFields } from 'lib/createIdFromObjectFields';
import { createNewBuilderItemName } from 'lib/newQueryBuilder/createNewBuilderItemName';
import { IAttributeValuesResponse } from 'types/api/queryBuilder/getAttributesValues';
@@ -549,49 +548,3 @@ export const DATA_TYPE_VS_ATTRIBUTE_VALUES_KEY: Record<
[DataTypes.ArrayBool]: 'boolAttributeValues',
[DataTypes.EMPTY]: 'stringAttributeValues',
};
export const listViewInitialLogQuery: Query = {
...initialQueriesMap.logs,
builder: {
...initialQueriesMap.logs.builder,
queryData: [
{
...initialQueriesMap.logs.builder.queryData[0],
aggregateOperator: LogsAggregatorOperator.NOOP,
orderBy: [{ columnName: 'timestamp', order: 'desc' }],
offset: 0,
pageSize: 100,
},
],
},
};
export const PANEL_TYPES_INITIAL_QUERY: Record<PANEL_TYPES, Query> = {
[PANEL_TYPES.TIME_SERIES]: initialQueriesMap.metrics,
[PANEL_TYPES.VALUE]: initialQueriesMap.metrics,
[PANEL_TYPES.TABLE]: initialQueriesMap.metrics,
[PANEL_TYPES.LIST]: listViewInitialLogQuery,
[PANEL_TYPES.TRACE]: initialQueriesMap.traces,
[PANEL_TYPES.BAR]: initialQueriesMap.metrics,
[PANEL_TYPES.PIE]: initialQueriesMap.metrics,
[PANEL_TYPES.HISTOGRAM]: initialQueriesMap.metrics,
[PANEL_TYPES.EMPTY_WIDGET]: initialQueriesMap.metrics,
};
export const listViewInitialTraceQuery: Query = {
// it should be the above commented query
...initialQueriesMap.traces,
builder: {
...initialQueriesMap.traces.builder,
queryData: [
{
...initialQueriesMap.traces.builder.queryData[0],
aggregateOperator: LogsAggregatorOperator.NOOP,
orderBy: [{ columnName: 'timestamp', order: 'desc' }],
offset: 0,
pageSize: 10,
selectColumns: defaultTraceSelectedColumns,
},
],
},
};

View File

@@ -1,4 +1,4 @@
.panel-type-selection-modal {
.graph-selection {
.ant-modal-content {
width: 515px;
max-height: 646px;
@@ -76,11 +76,6 @@
content: none;
}
}
.panel-type-text {
text-align: center;
margin-top: 1rem;
}
}
}
@@ -119,7 +114,7 @@
}
.lightMode {
.panel-type-selection-modal {
.graph-selection {
.ant-modal-content {
border: 1px solid var(--bg-vanilla-300);
background: var(--bg-vanilla-100);

View File

@@ -0,0 +1,50 @@
import { initialQueriesMap, PANEL_TYPES } from 'constants/queryBuilder';
import { defaultTraceSelectedColumns } from 'container/OptionsMenu/constants';
import { Query } from 'types/api/queryBuilder/queryBuilderData';
import { LogsAggregatorOperator } from 'types/common/queryBuilder';
export const PANEL_TYPES_INITIAL_QUERY = {
[PANEL_TYPES.TIME_SERIES]: initialQueriesMap.metrics,
[PANEL_TYPES.VALUE]: initialQueriesMap.metrics,
[PANEL_TYPES.TABLE]: initialQueriesMap.metrics,
[PANEL_TYPES.LIST]: initialQueriesMap.logs,
[PANEL_TYPES.TRACE]: initialQueriesMap.traces,
[PANEL_TYPES.BAR]: initialQueriesMap.metrics,
[PANEL_TYPES.PIE]: initialQueriesMap.metrics,
[PANEL_TYPES.HISTOGRAM]: initialQueriesMap.metrics,
[PANEL_TYPES.EMPTY_WIDGET]: initialQueriesMap.metrics,
};
export const listViewInitialLogQuery: Query = {
...initialQueriesMap.logs,
builder: {
...initialQueriesMap.logs.builder,
queryData: [
{
...initialQueriesMap.logs.builder.queryData[0],
aggregateOperator: LogsAggregatorOperator.NOOP,
orderBy: [{ columnName: 'timestamp', order: 'desc' }],
offset: 0,
pageSize: 100,
},
],
},
};
export const listViewInitialTraceQuery: Query = {
// it should be the above commented query
...initialQueriesMap.traces,
builder: {
...initialQueriesMap.traces.builder,
queryData: [
{
...initialQueriesMap.traces.builder.queryData[0],
aggregateOperator: LogsAggregatorOperator.NOOP,
orderBy: [{ columnName: 'timestamp', order: 'desc' }],
offset: 0,
pageSize: 10,
selectColumns: defaultTraceSelectedColumns,
},
],
},
};

View File

@@ -0,0 +1,94 @@
import { Card, Modal } from 'antd';
import logEvent from 'api/common/logEvent';
import { QueryParams } from 'constants/query';
import { PANEL_TYPES } from 'constants/queryBuilder';
import createQueryParams from 'lib/createQueryParams';
import history from 'lib/history';
import { useDashboard } from 'providers/Dashboard/Dashboard';
import { LogsAggregatorOperator } from 'types/common/queryBuilder';
import { v4 as uuid } from 'uuid';
import { PANEL_TYPES_INITIAL_QUERY } from './constants';
import menuItems from './menuItems';
import { Text } from './styles';
import './ComponentSlider.styles.scss';
function DashboardGraphSlider(): JSX.Element {
const { handleToggleDashboardSlider, isDashboardSliderOpen } = useDashboard();
const onClickHandler = (name: PANEL_TYPES) => (): void => {
const id = uuid();
handleToggleDashboardSlider(false);
logEvent('Dashboard Detail: New panel type selected', {
// dashboardId: '',
// dashboardName: '',
// numberOfPanels: 0, // todo - at this point we don't know these attributes
panelType: name,
widgetId: id,
});
const queryParamsLog = {
graphType: name,
widgetId: id,
[QueryParams.compositeQuery]: JSON.stringify({
...PANEL_TYPES_INITIAL_QUERY[name],
builder: {
...PANEL_TYPES_INITIAL_QUERY[name].builder,
queryData: [
{
...PANEL_TYPES_INITIAL_QUERY[name].builder.queryData[0],
aggregateOperator: LogsAggregatorOperator.NOOP,
orderBy: [{ columnName: 'timestamp', order: 'desc' }],
offset: 0,
pageSize: 100,
},
],
},
}),
};
const queryParams = {
graphType: name,
widgetId: id,
[QueryParams.compositeQuery]: JSON.stringify(
PANEL_TYPES_INITIAL_QUERY[name],
),
};
if (name === PANEL_TYPES.LIST) {
history.push(
`${history.location.pathname}/new?${createQueryParams(queryParamsLog)}`,
);
} else {
history.push(
`${history.location.pathname}/new?${createQueryParams(queryParams)}`,
);
}
};
const handleCardClick = (panelType: PANEL_TYPES): void => {
onClickHandler(panelType)();
};
return (
<Modal
open={isDashboardSliderOpen}
onCancel={(): void => {
handleToggleDashboardSlider(false);
}}
rootClassName="graph-selection"
footer={null}
title="New Panel"
>
<div className="panel-selection">
{menuItems.map(({ name, icon, display }) => (
<Card onClick={(): void => handleCardClick(name)} id={name} key={name}>
{icon}
<Text>{display}</Text>
</Card>
))}
</div>
</Modal>
);
}
export default DashboardGraphSlider;

View File

@@ -9,7 +9,7 @@ import {
Table,
} from 'lucide-react';
export const PanelTypesWithData: ItemsProps[] = [
const Items: ItemsProps[] = [
{
name: PANEL_TYPES.TIME_SERIES,
icon: <LineChart size={16} color={Color.BG_ROBIN_400} />,
@@ -52,3 +52,5 @@ export interface ItemsProps {
icon: JSX.Element;
display: string;
}
export default Items;

View File

@@ -0,0 +1,41 @@
import { Card as CardComponent, Typography } from 'antd';
import styled from 'styled-components';
export const Container = styled.div`
display: flex;
justify-content: right;
gap: 8px;
margin-bottom: 12px;
`;
export const Card = styled(CardComponent)`
min-height: 80px;
min-width: 120px;
overflow-y: auto;
cursor: pointer;
transition: transform 0.2s;
.ant-card-body {
padding: 12px;
height: 100%;
display: flex;
flex-direction: column;
justify-content: space-between;
align-items: center;
.ant-typography {
font-size: 12px;
font-weight: 600;
}
}
&:hover {
transform: scale(1.05);
border: 1px solid var(--bg-robin-400);
}
`;
export const Text = styled(Typography)`
text-align: center;
margin-top: 1rem;
`;

View File

@@ -182,7 +182,9 @@ describe('Dashboard landing page actions header tests', () => {
(useLocation as jest.Mock).mockReturnValue(mockLocation);
const mockContextValue: IDashboardContext = {
isDashboardSliderOpen: false,
isDashboardLocked: false,
handleToggleDashboardSlider: jest.fn(),
handleDashboardLockToggle: jest.fn(),
dashboardResponse: {} as IDashboardContext['dashboardResponse'],
selectedDashboard: (getDashboardById.data as unknown) as Dashboard,

View File

@@ -40,7 +40,6 @@ import {
} from 'lucide-react';
import { useAppContext } from 'providers/App/App';
import { useDashboard } from 'providers/Dashboard/Dashboard';
import { usePanelTypeSelectionModalStore } from 'providers/Dashboard/helpers/panelTypeSelectionModalHelper';
import { sortLayout } from 'providers/Dashboard/util';
import { DashboardData } from 'types/api/dashboard/getAll';
import { Props } from 'types/api/dashboard/update';
@@ -49,10 +48,10 @@ import { ComponentTypes } from 'utils/permission';
import { v4 as uuid } from 'uuid';
import DashboardHeader from '../components/DashboardHeader/DashboardHeader';
import DashboardGraphSlider from '../ComponentsSlider';
import DashboardSettings from '../DashboardSettings';
import { Base64Icons } from '../DashboardSettings/General/utils';
import DashboardVariableSelection from '../DashboardVariablesSelection';
import PanelTypeSelectionModal from '../PanelTypeSelectionModal';
import SettingsDrawer from './SettingsDrawer';
import { VariablesSettingsTab } from './types';
import {
@@ -70,9 +69,6 @@ interface DashboardDescriptionProps {
// eslint-disable-next-line sonarjs/cognitive-complexity
function DashboardDescription(props: DashboardDescriptionProps): JSX.Element {
const { handle } = props;
const setIsPanelTypeSelectionModalOpen = usePanelTypeSelectionModalStore(
(s) => s.setIsPanelTypeSelectionModalOpen,
);
const {
selectedDashboard,
panelMap,
@@ -81,6 +77,7 @@ function DashboardDescription(props: DashboardDescriptionProps): JSX.Element {
setLayouts,
isDashboardLocked,
setSelectedDashboard,
handleToggleDashboardSlider,
handleDashboardLockToggle,
} = useDashboard();
@@ -148,14 +145,14 @@ function DashboardDescription(props: DashboardDescriptionProps): JSX.Element {
const [addPanelPermission] = useComponentPermission(permissions, userRole);
const onEmptyWidgetHandler = useCallback(() => {
setIsPanelTypeSelectionModalOpen(true);
handleToggleDashboardSlider(true);
logEvent('Dashboard Detail: Add new panel clicked', {
dashboardId: selectedDashboard?.id,
dashboardName: selectedDashboard?.data.title,
numberOfPanels: selectedDashboard?.data.widgets?.length,
});
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [setIsPanelTypeSelectionModalOpen]);
}, [handleToggleDashboardSlider]);
const handleLockDashboardToggle = (): void => {
setIsDashbordSettingsOpen(false);
@@ -524,7 +521,7 @@ function DashboardDescription(props: DashboardDescriptionProps): JSX.Element {
<DashboardVariableSelection />
</section>
)}
<PanelTypeSelectionModal />
<DashboardGraphSlider />
<Modal
open={isRenameDashboardOpen}

View File

@@ -1,68 +0,0 @@
import { memo } from 'react';
import { Card, Modal, Typography } from 'antd';
import logEvent from 'api/common/logEvent';
import { QueryParams } from 'constants/query';
import { PANEL_TYPES, PANEL_TYPES_INITIAL_QUERY } from 'constants/queryBuilder';
import createQueryParams from 'lib/createQueryParams';
import history from 'lib/history';
import { usePanelTypeSelectionModalStore } from 'providers/Dashboard/helpers/panelTypeSelectionModalHelper';
import { v4 as uuid } from 'uuid';
import { PanelTypesWithData } from './menuItems';
import './PanelTypeSelectionModal.styles.scss';
function PanelTypeSelectionModal(): JSX.Element {
const {
isPanelTypeSelectionModalOpen,
setIsPanelTypeSelectionModalOpen,
} = usePanelTypeSelectionModalStore();
const onClickHandler = (name: PANEL_TYPES) => (): void => {
const id = uuid();
setIsPanelTypeSelectionModalOpen(false);
logEvent('Dashboard Detail: New panel type selected', {
panelType: name,
widgetId: id,
});
const queryParams = {
graphType: name,
widgetId: id,
[QueryParams.compositeQuery]: JSON.stringify(
PANEL_TYPES_INITIAL_QUERY[name],
),
};
history.push(
`${history.location.pathname}/new?${createQueryParams(queryParams)}`,
);
};
const handleCardClick = (panelType: PANEL_TYPES): void => {
onClickHandler(panelType)();
};
return (
<Modal
open={isPanelTypeSelectionModalOpen}
onCancel={(): void => {
setIsPanelTypeSelectionModalOpen(false);
}}
rootClassName="panel-type-selection-modal"
footer={null}
title="New Panel"
>
<div className="panel-selection">
{PanelTypesWithData.map(({ name, icon, display }) => (
<Card onClick={(): void => handleCardClick(name)} id={name} key={name}>
{icon}
<Typography className="panel-type-text">{display}</Typography>
</Card>
))}
</div>
</Modal>
);
}
export default memo(PanelTypeSelectionModal);

View File

@@ -9,18 +9,17 @@ import DashboardSettings from 'container/DashboardContainer/DashboardSettings';
import useComponentPermission from 'hooks/useComponentPermission';
import { useAppContext } from 'providers/App/App';
import { useDashboard } from 'providers/Dashboard/Dashboard';
import { usePanelTypeSelectionModalStore } from 'providers/Dashboard/helpers/panelTypeSelectionModalHelper';
import { ROLES, USER_ROLES } from 'types/roles';
import { ComponentTypes } from 'utils/permission';
import './DashboardEmptyState.styles.scss';
export default function DashboardEmptyState(): JSX.Element {
const setIsPanelTypeSelectionModalOpen = usePanelTypeSelectionModalStore(
(s) => s.setIsPanelTypeSelectionModalOpen,
);
const { selectedDashboard, isDashboardLocked } = useDashboard();
const {
selectedDashboard,
isDashboardLocked,
handleToggleDashboardSlider,
} = useDashboard();
const variablesSettingsTabHandle = useRef<VariablesSettingsTab>(null);
const [isSettingsDrawerOpen, setIsSettingsDrawerOpen] = useState<boolean>(
@@ -42,14 +41,14 @@ export default function DashboardEmptyState(): JSX.Element {
const [addPanelPermission] = useComponentPermission(permissions, userRole);
const onEmptyWidgetHandler = useCallback(() => {
setIsPanelTypeSelectionModalOpen(true);
handleToggleDashboardSlider(true);
logEvent('Dashboard Detail: Add new panel clicked', {
dashboardId: selectedDashboard?.id,
dashboardName: selectedDashboard?.data.title,
numberOfPanels: selectedDashboard?.data.widgets?.length,
});
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [setIsPanelTypeSelectionModalOpen]);
}, [handleToggleDashboardSlider]);
const onConfigureClick = useCallback((): void => {
setIsSettingsDrawerOpen(true);

View File

@@ -2,7 +2,7 @@ import { useCallback } from 'react';
import { Select, Typography } from 'antd';
import { QueryParams } from 'constants/query';
import { PANEL_TYPES } from 'constants/queryBuilder';
import { PanelTypesWithData } from 'container/DashboardContainer/PanelTypeSelectionModal/menuItems';
import GraphTypes from 'container/DashboardContainer/ComponentsSlider/menuItems';
import { handleQueryChange } from 'container/NewWidget/utils';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
import { Query } from 'types/api/queryBuilder/queryBuilderData';
@@ -59,7 +59,7 @@ function PanelTypeSelector({
data-testid="panel-change-select"
disabled={disabled}
>
{PanelTypesWithData.map((item) => (
{GraphTypes.map((item) => (
<Option key={item.name} value={item.name}>
<div className="view-panel-select-option">
<div className="icon">{item.icon}</div>

View File

@@ -5,7 +5,6 @@ import useComponentPermission from 'hooks/useComponentPermission';
import { EllipsisIcon, PenLine, Plus, X } from 'lucide-react';
import { useAppContext } from 'providers/App/App';
import { useDashboard } from 'providers/Dashboard/Dashboard';
import { usePanelTypeSelectionModalStore } from 'providers/Dashboard/helpers/panelTypeSelectionModalHelper';
import { setSelectedRowWidgetId } from 'providers/Dashboard/helpers/selectedRowWidgetIdHelper';
import { ROLES, USER_ROLES } from 'types/roles';
import { ComponentTypes } from 'utils/permission';
@@ -35,11 +34,11 @@ export function WidgetRowHeader(props: WidgetRowHeaderProps): JSX.Element {
} = props;
const [isRowSettingsOpen, setIsRowSettingsOpen] = useState<boolean>(false);
const setIsPanelTypeSelectionModalOpen = usePanelTypeSelectionModalStore(
(s) => s.setIsPanelTypeSelectionModalOpen,
);
const { selectedDashboard, isDashboardLocked } = useDashboard();
const {
handleToggleDashboardSlider,
selectedDashboard,
isDashboardLocked,
} = useDashboard();
const permissions: ComponentTypes[] = ['add_panel'];
const { user } = useAppContext();
@@ -88,7 +87,7 @@ export function WidgetRowHeader(props: WidgetRowHeaderProps): JSX.Element {
}
setSelectedRowWidgetId(selectedDashboard.id, id);
setIsPanelTypeSelectionModalOpen(true);
handleToggleDashboardSlider(true);
}}
>
New Panel

View File

@@ -15,7 +15,6 @@ import ROUTES from 'constants/routes';
import { getMetricsListQuery } from 'container/MetricsExplorer/Summary/utils';
import { useGetMetricsList } from 'hooks/metricsExplorer/useGetMetricsList';
import { useGetQueryRange } from 'hooks/queryBuilder/useGetQueryRange';
import { useIsDarkMode } from 'hooks/useDarkMode';
import history from 'lib/history';
import cloneDeep from 'lodash-es/cloneDeep';
import { AnimatePresence } from 'motion/react';
@@ -44,7 +43,6 @@ const homeInterval = 30 * 60 * 1000;
// eslint-disable-next-line sonarjs/cognitive-complexity
export default function Home(): JSX.Element {
const { user } = useAppContext();
const isDarkMode = useIsDarkMode();
const [startTime, setStartTime] = useState<number | null>(null);
const [endTime, setEndTime] = useState<number | null>(null);
@@ -682,11 +680,7 @@ export default function Home(): JSX.Element {
<div className="checklist-img-container">
<img
src={
isDarkMode
? '/Images/allInOne.svg'
: '/Images/allInOneLightMode.svg'
}
src="/Images/allInOne.svg"
alt="checklist-img"
className="checklist-img"
/>

View File

@@ -20,10 +20,9 @@ import {
import { PrecisionOption, PrecisionOptionsEnum } from 'components/Graph/types';
import TimePreference from 'components/TimePreferenceDropDown';
import { PANEL_TYPES, PanelDisplay } from 'constants/queryBuilder';
import {
import GraphTypes, {
ItemsProps,
PanelTypesWithData,
} from 'container/DashboardContainer/PanelTypeSelectionModal/menuItems';
} from 'container/DashboardContainer/ComponentsSlider/menuItems';
import { useDashboardVariables } from 'hooks/dashboard/useDashboardVariables';
import useCreateAlerts from 'hooks/queryBuilder/useCreateAlerts';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
@@ -151,7 +150,7 @@ function RightContainer({
);
const selectedGraphType =
PanelTypesWithData.find((e) => e.name === selectedGraph)?.display || '';
GraphTypes.find((e) => e.name === selectedGraph)?.display || '';
const onCreateAlertsHandler = useCreateAlerts(selectedWidget, 'panelView');
@@ -177,7 +176,7 @@ function RightContainer({
const { currentQuery } = useQueryBuilder();
const [graphTypes, setGraphTypes] = useState<ItemsProps[]>(PanelTypesWithData);
const [graphTypes, setGraphTypes] = useState<ItemsProps[]>(GraphTypes);
const dashboardVariableOptions = useMemo<VariableOption[]>(() => {
return Object.entries(dashboardVariables).map(([, value]) => ({
@@ -273,7 +272,7 @@ function RightContainer({
prev.filter((graph) => graph.name !== PANEL_TYPES.LIST),
);
} else {
setGraphTypes(PanelTypesWithData);
setGraphTypes(GraphTypes);
}
}, [currentQuery]);

View File

@@ -11,8 +11,11 @@ import { getYAxisCategories } from 'components/YAxisUnitSelector/utils';
import {
initialQueryBuilderFormValuesMap,
PANEL_TYPES,
PANEL_TYPES_INITIAL_QUERY,
} from 'constants/queryBuilder';
import {
listViewInitialLogQuery,
PANEL_TYPES_INITIAL_QUERY,
} from 'container/DashboardContainer/ComponentsSlider/constants';
import {
defaultLogsSelectedColumns,
defaultTraceSelectedColumns,
@@ -546,7 +549,10 @@ export const getDefaultWidgetData = (
nullZeroValues: '',
opacity: '',
panelTypes: name,
query: PANEL_TYPES_INITIAL_QUERY[name],
query:
name === PANEL_TYPES.LIST
? listViewInitialLogQuery
: PANEL_TYPES_INITIAL_QUERY[name],
timePreferance: 'GLOBAL_TIME',
softMax: null,
softMin: null,

View File

@@ -12,8 +12,6 @@ import {
ATTRIBUTE_TYPES,
initialAutocompleteData,
initialQueryBuilderFormValuesMap,
listViewInitialLogQuery,
listViewInitialTraceQuery,
mapOfFormulaToFilters,
mapOfQueryFilters,
PANEL_TYPES,
@@ -25,6 +23,10 @@ import {
metricsUnknownSpaceAggregateOperatorOptions,
metricsUnknownTimeAggregateOperatorOptions,
} from 'constants/queryBuilderOperators';
import {
listViewInitialLogQuery,
listViewInitialTraceQuery,
} from 'container/DashboardContainer/ComponentsSlider/constants';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
import { getMetricsOperatorsByAttributeType } from 'lib/newQueryBuilder/getMetricsOperatorsByAttributeType';
import { getOperatorsBySourceAndPanelType } from 'lib/newQueryBuilder/getOperatorsBySourceAndPanelType';

View File

@@ -53,7 +53,9 @@ import { IDashboardContext, WidgetColumnWidths } from './types';
import { sortLayout } from './util';
export const DashboardContext = createContext<IDashboardContext>({
isDashboardSliderOpen: false,
isDashboardLocked: false,
handleToggleDashboardSlider: () => {},
handleDashboardLockToggle: () => {},
dashboardResponse: {} as UseQueryResult<
SuccessResponseV2<Dashboard>,
@@ -80,6 +82,8 @@ export function DashboardProvider({
children,
dashboardId,
}: PropsWithChildren<{ dashboardId: string }>): JSX.Element {
const [isDashboardSliderOpen, setIsDashboardSlider] = useState<boolean>(false);
const [isDashboardLocked, setIsDashboardLocked] = useState<boolean>(false);
const [
@@ -284,8 +288,13 @@ export function DashboardProvider({
}
}, [isVisible]);
const handleToggleDashboardSlider = (value: boolean): void => {
setIsDashboardSlider(value);
};
const { mutate: lockDashboard } = useMutation(locked, {
onSuccess: (_, props) => {
setIsDashboardSlider(false);
setIsDashboardLocked(props.lock);
},
onError: (error) => {
@@ -310,7 +319,9 @@ export function DashboardProvider({
const value: IDashboardContext = useMemo(
() => ({
isDashboardSliderOpen,
isDashboardLocked,
handleToggleDashboardSlider,
handleDashboardLockToggle,
dashboardResponse,
selectedDashboard,
@@ -330,6 +341,7 @@ export function DashboardProvider({
}),
// eslint-disable-next-line react-hooks/exhaustive-deps
[
isDashboardSliderOpen,
isDashboardLocked,
dashboardResponse,
selectedDashboard,

View File

@@ -1,18 +0,0 @@
import { create } from 'zustand';
interface IPanelTypeSelectionModalState {
isPanelTypeSelectionModalOpen: boolean;
setIsPanelTypeSelectionModalOpen: (isOpen: boolean) => void;
}
/**
* This helper is used for selecting the panel type when creating a new panel in the dashboard.
* It uses Zustand for state management to keep track of whether the panel type selection modal is open or closed.
*/
export const usePanelTypeSelectionModalStore = create<IPanelTypeSelectionModalState>(
(set) => ({
isPanelTypeSelectionModalOpen: false,
setIsPanelTypeSelectionModalOpen: (isOpen): void =>
set({ isPanelTypeSelectionModalOpen: isOpen }),
}),
);

View File

@@ -9,7 +9,9 @@ export type WidgetColumnWidths = {
};
export interface IDashboardContext {
isDashboardSliderOpen: boolean;
isDashboardLocked: boolean;
handleToggleDashboardSlider: (value: boolean) => void;
handleDashboardLockToggle: (value: boolean) => void;
dashboardResponse: UseQueryResult<SuccessResponseV2<Dashboard>, unknown>;
selectedDashboard: Dashboard | undefined;

View File

@@ -18,7 +18,6 @@ import (
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/modules/preference"
"github.com/SigNoz/signoz/pkg/modules/promote"
"github.com/SigNoz/signoz/pkg/modules/rawdataexport"
"github.com/SigNoz/signoz/pkg/modules/serviceaccount"
"github.com/SigNoz/signoz/pkg/modules/session"
"github.com/SigNoz/signoz/pkg/modules/user"
@@ -48,7 +47,6 @@ type provider struct {
gatewayHandler gateway.Handler
fieldsHandler fields.Handler
authzHandler authz.Handler
rawDataExportHandler rawdataexport.Handler
zeusHandler zeus.Handler
querierHandler querier.Handler
serviceAccountHandler serviceaccount.Handler
@@ -71,7 +69,6 @@ func NewFactory(
gatewayHandler gateway.Handler,
fieldsHandler fields.Handler,
authzHandler authz.Handler,
rawDataExportHandler rawdataexport.Handler,
zeusHandler zeus.Handler,
querierHandler querier.Handler,
serviceAccountHandler serviceaccount.Handler,
@@ -97,7 +94,6 @@ func NewFactory(
gatewayHandler,
fieldsHandler,
authzHandler,
rawDataExportHandler,
zeusHandler,
querierHandler,
serviceAccountHandler,
@@ -125,7 +121,6 @@ func newProvider(
gatewayHandler gateway.Handler,
fieldsHandler fields.Handler,
authzHandler authz.Handler,
rawDataExportHandler rawdataexport.Handler,
zeusHandler zeus.Handler,
querierHandler querier.Handler,
serviceAccountHandler serviceaccount.Handler,
@@ -151,7 +146,6 @@ func newProvider(
gatewayHandler: gatewayHandler,
fieldsHandler: fieldsHandler,
authzHandler: authzHandler,
rawDataExportHandler: rawDataExportHandler,
zeusHandler: zeusHandler,
querierHandler: querierHandler,
serviceAccountHandler: serviceAccountHandler,
@@ -227,10 +221,6 @@ func (provider *provider) AddToRouter(router *mux.Router) error {
return err
}
if err := provider.addRawDataExportRoutes(router); err != nil {
return err
}
if err := provider.addZeusRoutes(router); err != nil {
return err
}

View File

@@ -1,47 +0,0 @@
package signozapiserver
import (
"net/http"
"github.com/SigNoz/signoz/pkg/http/handler"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/exporttypes"
v5 "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/gorilla/mux"
)
func (provider *provider) addRawDataExportRoutes(router *mux.Router) error {
if err := router.Handle("/api/v1/export_raw_data", handler.New(provider.authZ.ViewAccess(provider.rawDataExportHandler.ExportRawData), handler.OpenAPIDef{
ID: "HandleExportRawDataGET",
Tags: []string{"logs", "traces"},
Summary: "Export raw data",
Description: "This endpoints allows simple query exporting raw data for traces and logs",
RequestQuery: new(exporttypes.ExportRawDataQueryParams),
RequestContentType: "",
Response: nil,
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusBadRequest},
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
})).Methods(http.MethodGet).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v1/export_raw_data", handler.New(provider.authZ.ViewAccess(provider.rawDataExportHandler.ExportRawData), handler.OpenAPIDef{
ID: "HandleExportRawDataPOST",
Tags: []string{"logs", "traces"},
Summary: "Export raw data",
Description: "This endpoints allows complex query exporting raw data for traces and logs",
Request: new(v5.QueryRangeRequest),
RequestQuery: new(exporttypes.ExportRawDataFormatQueryParam),
RequestContentType: "application/json",
Response: nil,
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusBadRequest},
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
})).Methods(http.MethodPost).GetError(); err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,92 @@
package cloudintegration
import (
"context"
"net/http"
"github.com/SigNoz/signoz/pkg/types/cloudintegrationtypes"
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type Module interface {
// CreateConnectionArtifact generates cloud provider specific connection information,
// client side handles how this information is shown
CreateConnectionArtifact(
ctx context.Context,
orgID valuer.UUID,
provider cloudintegrationtypes.CloudProviderType,
request *cloudintegrationtypes.ConnectionArtifactRequest,
) (*cloudintegrationtypes.ConnectionArtifact, error)
// GetAccountStatus returns agent connection status for a cloud integration account
GetAccountStatus(ctx context.Context, orgID, accountID valuer.UUID) (*cloudintegrationtypes.AccountStatus, error)
// ListConnectedAccounts lists accounts where agent is connected
ListConnectedAccounts(ctx context.Context, orgID valuer.UUID) (*cloudintegrationtypes.ConnectedAccounts, error)
// DisconnectAccount soft deletes/removes a cloud integration account.
DisconnectAccount(ctx context.Context, orgID, accountID valuer.UUID) error
// UpdateAccountConfig updates the configuration of an existing cloud account for a specific organization.
UpdateAccountConfig(
ctx context.Context,
orgId,
accountId valuer.UUID,
config *cloudintegrationtypes.UpdateAccountConfigRequest,
) (*cloudintegrationtypes.Account, error)
// ListServicesSummary return list of services for a cloud provider attached with the accountID.
// This just returns a summary of the service and not the whole service definition
ListServicesSummary(ctx context.Context, orgID valuer.UUID, accountID *valuer.UUID) (*cloudintegrationtypes.ServicesSummary, error)
// GetService returns service definition details for a serviceId. This returns config and
// other details required to show in service details page on web client.
GetService(ctx context.Context, orgID valuer.UUID, serviceID string, accountID *valuer.UUID) (*cloudintegrationtypes.Service, error)
// UpdateServiceConfig updates cloud integration service config
UpdateServiceConfig(
ctx context.Context,
serviceId string,
orgID valuer.UUID,
config *cloudintegrationtypes.UpdateServiceConfigRequest,
) (*cloudintegrationtypes.ServiceSummary, error)
// AgentCheckIn is called by agent to heartbeat and get latest config in response.
AgentCheckIn(
ctx context.Context,
orgID valuer.UUID,
req *cloudintegrationtypes.AgentCheckInRequest,
) (cloudintegrationtypes.AgentCheckInResponse, error)
// GetDashboardByID returns dashboard JSON for a given dashboard id.
// this only returns the dashboard when the service (embedded in dashboard id) is enabled for
GetDashboardByID(ctx context.Context, id string, orgID valuer.UUID) (*dashboardtypes.Dashboard, error)
// GetAllDashboards returns list of dashboards across all connected cloud integration accounts
// and enabled services in the org. This list gets added to dashboard list page
GetAllDashboards(ctx context.Context, orgID valuer.UUID) ([]*dashboardtypes.Dashboard, error)
}
// CloudProvider is the interface each cloud provider must implement.
type CloudProvider interface {
CreateArtifact(
ctx context.Context,
orgID valuer.UUID,
request *cloudintegrationtypes.ConnectionArtifactRequest,
creds cloudintegrationtypes.SignozCredentials,
accountID valuer.UUID,
) (artifact *cloudintegrationtypes.ConnectionArtifact, err error)
}
type Handler interface {
AgentCheckIn(http.ResponseWriter, *http.Request)
GenerateConnectionArtifact(http.ResponseWriter, *http.Request)
ListConnectedAccounts(http.ResponseWriter, *http.Request)
GetAccountStatus(http.ResponseWriter, *http.Request)
ListServices(http.ResponseWriter, *http.Request)
GetServiceDetails(http.ResponseWriter, *http.Request)
UpdateAccountConfig(http.ResponseWriter, *http.Request)
UpdateServiceConfig(http.ResponseWriter, *http.Request)
DisconnectAccount(http.ResponseWriter, *http.Request)
}

View File

@@ -0,0 +1,61 @@
package implcloudintegration
import (
"context"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
"github.com/SigNoz/signoz/pkg/types/cloudintegrationtypes"
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type module struct{}
func NewModule() cloudintegration.Module {
return &module{}
}
func (m *module) CreateConnectionArtifact(_ context.Context, _ valuer.UUID, _ cloudintegrationtypes.CloudProviderType, _ *cloudintegrationtypes.ConnectionArtifactRequest) (*cloudintegrationtypes.ConnectionArtifact, error) {
return nil, errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "cloud integration is an enterprise feature")
}
func (m *module) GetAccountStatus(_ context.Context, _, _ valuer.UUID) (*cloudintegrationtypes.AccountStatus, error) {
return nil, errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "cloud integration is an enterprise feature")
}
func (m *module) ListConnectedAccounts(_ context.Context, _ valuer.UUID) (*cloudintegrationtypes.ConnectedAccounts, error) {
return nil, errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "cloud integration is an enterprise feature")
}
func (m *module) DisconnectAccount(_ context.Context, _, _ valuer.UUID) error {
return errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "cloud integration is an enterprise feature")
}
func (m *module) UpdateAccountConfig(_ context.Context, _, _ valuer.UUID, _ *cloudintegrationtypes.UpdateAccountConfigRequest) (*cloudintegrationtypes.Account, error) {
return nil, errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "cloud integration is an enterprise feature")
}
func (m *module) ListServicesSummary(_ context.Context, _ valuer.UUID, _ *valuer.UUID) (*cloudintegrationtypes.ServicesSummary, error) {
return nil, errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "cloud integration is an enterprise feature")
}
func (m *module) GetService(_ context.Context, _ valuer.UUID, _ string, _ *valuer.UUID) (*cloudintegrationtypes.Service, error) {
return nil, errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "cloud integration is an enterprise feature")
}
func (m *module) UpdateServiceConfig(_ context.Context, _ string, _ valuer.UUID, _ *cloudintegrationtypes.UpdateServiceConfigRequest) (*cloudintegrationtypes.ServiceSummary, error) {
return nil, errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "cloud integration is an enterprise feature")
}
func (m *module) AgentCheckIn(_ context.Context, _ valuer.UUID, _ *cloudintegrationtypes.AgentCheckInRequest) (cloudintegrationtypes.AgentCheckInResponse, error) {
return cloudintegrationtypes.AgentCheckInResponse{}, errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "cloud integration is an enterprise feature")
}
func (m *module) GetDashboardByID(_ context.Context, _ string, _ valuer.UUID) (*dashboardtypes.Dashboard, error) {
return nil, errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "cloud integration is an enterprise feature")
}
func (m *module) GetAllDashboards(_ context.Context, _ valuer.UUID) ([]*dashboardtypes.Dashboard, error) {
return nil, errors.New(errors.TypeUnsupported, errors.CodeUnsupported, "cloud integration is an enterprise feature")
}

View File

@@ -0,0 +1,118 @@
package implcloudintegration
import (
"context"
"time"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/cloudintegrationtypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type store struct {
store sqlstore.SQLStore
}
func NewStore(sqlStore sqlstore.SQLStore) cloudintegrationtypes.Store {
return &store{store: sqlStore}
}
func (s *store) GetAccountByID(ctx context.Context, orgID, id valuer.UUID, provider cloudintegrationtypes.CloudProviderType) (*cloudintegrationtypes.StorableCloudIntegration, error) {
account := new(cloudintegrationtypes.StorableCloudIntegration)
err := s.store.BunDB().NewSelect().Model(account).
Where("id = ?", id).
Where("org_id = ?", orgID).
Where("provider = ?", provider).
Scan(ctx)
if err != nil {
return nil, s.store.WrapNotFoundErrf(err, cloudintegrationtypes.ErrCodeCloudIntegrationNotFound, "cloud integration account with id %s not found", id)
}
return account, nil
}
func (s *store) UpsertAccount(ctx context.Context, account *cloudintegrationtypes.StorableCloudIntegration) error {
account.UpdatedAt = time.Now()
_, err := s.store.BunDBCtx(ctx).NewInsert().Model(account).
On("CONFLICT (id, provider, org_id) DO UPDATE").
Set("config = EXCLUDED.config").
Set("account_id = EXCLUDED.account_id").
Set("last_agent_report = EXCLUDED.last_agent_report").
Set("removed_at = EXCLUDED.removed_at").
Set("updated_at = EXCLUDED.updated_at").
Exec(ctx)
return err
}
func (s *store) RemoveAccount(ctx context.Context, orgID, id valuer.UUID, provider cloudintegrationtypes.CloudProviderType) error {
_, err := s.store.BunDBCtx(ctx).NewUpdate().Model((*cloudintegrationtypes.StorableCloudIntegration)(nil)).
Set("removed_at = ?", time.Now()).
Where("id = ?", id).
Where("org_id = ?", orgID).
Where("provider = ?", provider).
Exec(ctx)
return err
}
func (s *store) GetConnectedAccounts(ctx context.Context, orgID valuer.UUID, provider cloudintegrationtypes.CloudProviderType) ([]*cloudintegrationtypes.StorableCloudIntegration, error) {
var accounts []*cloudintegrationtypes.StorableCloudIntegration
err := s.store.BunDB().NewSelect().Model(&accounts).
Where("org_id = ?", orgID).
Where("provider = ?", provider).
Where("removed_at IS NULL").
Where("account_id IS NOT NULL").
Where("last_agent_report IS NOT NULL").
Order("created_at ASC").
Scan(ctx)
if err != nil {
return nil, err
}
return accounts, nil
}
func (s *store) GetConnectedAccount(ctx context.Context, orgID valuer.UUID, provider cloudintegrationtypes.CloudProviderType, providerAccountID string) (*cloudintegrationtypes.StorableCloudIntegration, error) {
account := new(cloudintegrationtypes.StorableCloudIntegration)
err := s.store.BunDB().NewSelect().Model(account).
Where("org_id = ?", orgID).
Where("provider = ?", provider).
Where("account_id = ?", providerAccountID).
Where("last_agent_report IS NOT NULL").
Where("removed_at IS NULL").
Scan(ctx)
if err != nil {
return nil, s.store.WrapNotFoundErrf(err, cloudintegrationtypes.ErrCodeCloudIntegrationNotFound, "connected account with provider account id %s not found", providerAccountID)
}
return account, nil
}
func (s *store) GetServiceByType(ctx context.Context, cloudIntegrationID valuer.UUID, serviceType string) (*cloudintegrationtypes.StorableCloudIntegrationService, error) {
service := new(cloudintegrationtypes.StorableCloudIntegrationService)
err := s.store.BunDB().NewSelect().Model(service).
Where("cloud_integration_id = ?", cloudIntegrationID).
Where("type = ?", serviceType).
Scan(ctx)
if err != nil {
return nil, s.store.WrapNotFoundErrf(err, cloudintegrationtypes.ErrCodeCloudIntegrationNotFound, "cloud integration service with type %s not found", serviceType)
}
return service, nil
}
func (s *store) UpsertService(ctx context.Context, service *cloudintegrationtypes.StorableCloudIntegrationService) error {
service.UpdatedAt = time.Now()
_, err := s.store.BunDBCtx(ctx).NewInsert().Model(service).
On("CONFLICT (cloud_integration_id, type) DO UPDATE").
Set("config = EXCLUDED.config").
Set("updated_at = EXCLUDED.updated_at").
Exec(ctx)
return err
}
func (s *store) GetServices(ctx context.Context, cloudIntegrationID valuer.UUID) ([]*cloudintegrationtypes.StorableCloudIntegrationService, error) {
var services []*cloudintegrationtypes.StorableCloudIntegrationService
err := s.store.BunDB().NewSelect().Model(&services).
Where("cloud_integration_id = ?", cloudIntegrationID).
Scan(ctx)
if err != nil {
return nil, err
}
return services, nil
}

View File

@@ -6,18 +6,18 @@ import (
"fmt"
"io"
"net/http"
"slices"
"net/url"
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/binding"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/modules/rawdataexport"
"github.com/SigNoz/signoz/pkg/telemetrylogs"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/exporttypes"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/SigNoz/signoz/pkg/valuer"
@@ -31,51 +31,129 @@ func NewHandler(module rawdataexport.Module) rawdataexport.Handler {
return &handler{module: module}
}
// ExportRawData handles data export requests.
//
// API Documentation:
// Endpoint: GET /api/v1/export_raw_data
//
// Query Parameters:
//
// - source (optional): Type of data to export ["logs" (default), "metrics", "traces"]
// Note: Currently only "logs" is fully supported
//
// - format (optional): Output format ["csv" (default), "jsonl"]
//
// - start (required): Start time for query (Unix timestamp in nanoseconds)
//
// - end (required): End time for query (Unix timestamp in nanoseconds)
//
// - limit (optional): Maximum number of rows to export
// Constraints: Must be positive and cannot exceed MAX_EXPORT_ROW_COUNT_LIMIT
//
// - filter (optional): Filter expression to apply to the query
//
// - columns (optional): Specific columns to include in export
// Default: all columns are returned
// Format: ["context.field:type", "context.field", "field"]
//
// - order_by (optional): Sorting specification ["column:direction" or "context.field:type:direction"]
// Direction: "asc" or "desc"
// Default: ["timestamp:desc", "id:desc"]
//
// Response Headers:
// - Content-Type: "text/csv" or "application/x-ndjson"
// - Content-Encoding: "gzip" (handled by HTTP middleware)
// - Content-Disposition: "attachment; filename=\"data_exported.[format]\""
// - Cache-Control: "no-cache"
// - Vary: "Accept-Encoding"
// - Transfer-Encoding: "chunked"
// - Trailers: X-Response-Complete
//
// Response Format:
//
// CSV: Headers in first row, data in subsequent rows
// JSONL: One JSON object per line
//
// Example Usage:
//
// Basic CSV export:
// GET /api/v1/export_raw_data?start=1693612800000000000&end=1693699199000000000
//
// Export with columns and format:
// GET /api/v1/export_raw_data?start=1693612800000000000&end=1693699199000000000&format=jsonl
// &columns=timestamp&columns=severity&columns=message
//
// Export with filter and ordering:
// GET /api/v1/export_raw_data?start=1693612800000000000&end=1693699199000000000
// &filter=severity="error"&order_by=timestamp:desc&limit=1000
func (handler *handler) ExportRawData(rw http.ResponseWriter, r *http.Request) {
var queryRangeRequest qbtypes.QueryRangeRequest
var format string
source, err := getExportQuerySource(r.URL.Query())
if err != nil {
render.Error(rw, err)
return
}
switch r.Method {
case http.MethodGet:
var params exporttypes.ExportRawDataQueryParams
if err := binding.Query.BindQuery(r.URL.Query(), &params); err != nil {
render.Error(rw, err)
return
}
params.Normalize()
if err := params.Validate(); err != nil {
render.Error(rw, err)
return
}
format = params.Format
queryRangeRequest = buildQueryRangeRequest(&params)
case http.MethodPost:
var formatParam exporttypes.ExportRawDataFormatQueryParam
if err := binding.Query.BindQuery(r.URL.Query(), &formatParam); err != nil {
render.Error(rw, err)
return
}
format = formatParam.Format
if err := json.NewDecoder(r.Body).Decode(&queryRangeRequest); err != nil {
render.Error(rw, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid request body: %v", err))
return
}
switch source {
case "logs":
handler.exportLogs(rw, r)
case "traces":
handler.exportTraces(rw, r)
case "metrics":
handler.exportMetrics(rw, r)
default:
render.Error(rw, errors.Newf(errors.TypeMethodNotAllowed, errors.CodeMethodNotAllowed, "method not allowed, only GET/POST supported"))
return
render.Error(rw, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid source: must be logs"))
}
}
if err := validateSpecForExport(&queryRangeRequest); err != nil {
func (handler *handler) exportMetrics(rw http.ResponseWriter, r *http.Request) {
render.Error(rw, errors.Newf(errors.TypeUnsupported, errors.CodeUnsupported, "metrics export is not yet supported"))
}
func (handler *handler) exportTraces(rw http.ResponseWriter, r *http.Request) {
render.Error(rw, errors.Newf(errors.TypeUnsupported, errors.CodeUnsupported, "traces export is not yet supported"))
}
func (handler *handler) exportLogs(rw http.ResponseWriter, r *http.Request) {
// Set up response headers
rw.Header().Set("Cache-Control", "no-cache")
rw.Header().Set("Vary", "Accept-Encoding") // Indicate that response varies based on Accept-Encoding
rw.Header().Set("Access-Control-Expose-Headers", "Content-Disposition, X-Response-Complete")
rw.Header().Set("Trailer", "X-Response-Complete")
rw.Header().Set("Transfer-Encoding", "chunked")
queryParams := r.URL.Query()
startTime, endTime, err := getExportQueryTimeRange(queryParams)
if err != nil {
render.Error(rw, err)
return
}
if err := validateAndApplyDefaultExportLimits(queryRangeRequest.CompositeQuery.Queries); err != nil {
limit, err := getExportQueryLimit(queryParams)
if err != nil {
render.Error(rw, err)
return
}
queryRangeRequest.UseDefaultOrderBy()
format, err := getExportQueryFormat(queryParams)
if err != nil {
render.Error(rw, err)
return
}
// Set appropriate content type and filename
filename := fmt.Sprintf("data_exported_%s.%s", time.Now().Format("2006-01-02_150405"), format)
rw.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
filterExpression := queryParams.Get("filter")
orderByExpression, err := getExportQueryOrderBy(queryParams)
if err != nil {
render.Error(rw, err)
return
}
columns := getExportQueryColumns(queryParams)
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
@@ -83,136 +161,76 @@ func (handler *handler) ExportRawData(rw http.ResponseWriter, r *http.Request) {
return
}
orgID := valuer.MustNewUUID(claims.OrgID)
orgID, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(rw, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "orgID is invalid"))
return
}
setExportResponseHeaders(rw, format)
queryRangeRequest := qbtypes.QueryRangeRequest{
Start: startTime,
End: endTime,
RequestType: qbtypes.RequestTypeRaw,
CompositeQuery: qbtypes.CompositeQuery{
Queries: []qbtypes.QueryEnvelope{
{
Type: qbtypes.QueryTypeBuilder,
Spec: nil,
},
},
},
}
spec := qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
Name: "raw",
Filter: &qbtypes.Filter{
Expression: filterExpression,
},
Limit: limit,
Order: orderByExpression,
}
spec.SelectFields = columns
queryRangeRequest.CompositeQuery.Queries[0].Spec = spec
// This will signal Export module to stop sending data
doneChan := make(chan any)
defer close(doneChan)
rowChan, errChan := handler.module.ExportRawData(r.Context(), orgID, &queryRangeRequest, doneChan)
isComplete, err := handler.executeExport(rowChan, errChan, format, rw)
if err != nil {
render.Error(rw, err)
return
}
rw.Header().Set("X-Response-Complete", strconv.FormatBool(isComplete))
}
var isComplete bool
// validateSpecForExport validates query specs
func validateSpecForExport(req *qbtypes.QueryRangeRequest) error {
queries := req.CompositeQuery.Queries
// If the trace operator query is not present, and there are multiple queries, return an error
if req.TraceOperatorQueryIndex() == -1 && len(queries) > 1 {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "multiple queries not allowed without a trace operator query")
}
for idx := range queries {
switch spec := queries[idx].Spec.(type) {
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation],
qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation],
qbtypes.QueryBuilderTraceOperator:
// Supported spec types
default:
return errors.NewInvalidInputf(errors.CodeInvalidInput, "unsupported query at index %d type: %T", idx, spec)
}
}
err := req.Validate(qbtypes.WithSkipLimitOffsetValidation())
if err != nil {
return err
}
return nil
}
func validateAndApplyDefaultExportLimits(queries []qbtypes.QueryEnvelope) error {
for idx := range queries {
limit := queries[idx].GetLimit()
if limit == 0 {
limit = DefaultExportRowCountLimit
} else if limit < 0 {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "limit must be positive")
} else if limit > MaxExportRowCountLimit {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "limit cannot be more than %d", MaxExportRowCountLimit)
}
queries[idx].SetLimit(limit)
}
return nil
}
// buildQueryEnvelope creates a QueryEnvelope with a QueryBuilderQuery for the given aggregation type.
func buildQueryEnvelope[T any](signal telemetrytypes.Signal, filter *qbtypes.Filter, limit int, order []qbtypes.OrderBy, selectFields []telemetrytypes.TelemetryFieldKey) qbtypes.QueryEnvelope {
return qbtypes.QueryEnvelope{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[T]{
Signal: signal,
Filter: filter,
Limit: limit,
Order: order,
SelectFields: selectFields,
},
}
}
// buildQueryRangeRequest builds a QueryRangeRequest from already-bound and validated GET query params.
func buildQueryRangeRequest(params *exporttypes.ExportRawDataQueryParams) qbtypes.QueryRangeRequest {
var query qbtypes.QueryEnvelope
switch params.Signal {
case telemetrytypes.SignalLogs:
query = buildQueryEnvelope[qbtypes.LogAggregation](params.Signal, &params.Filter, params.Limit, params.Order, params.SelectFields)
case telemetrytypes.SignalTraces:
query = buildQueryEnvelope[qbtypes.TraceAggregation](params.Signal, &params.Filter, params.Limit, params.Order, params.SelectFields)
}
return qbtypes.QueryRangeRequest{
Start: params.Start,
End: params.End,
RequestType: qbtypes.RequestTypeRaw,
CompositeQuery: qbtypes.CompositeQuery{
Queries: []qbtypes.QueryEnvelope{query},
},
}
}
// setExportResponseHeaders sets common HTTP headers for export responses.
func setExportResponseHeaders(rw http.ResponseWriter, format string) {
rw.Header().Set("Cache-Control", "no-cache")
rw.Header().Set("Vary", "Accept-Encoding")
rw.Header().Set("Access-Control-Expose-Headers", "Content-Disposition, X-Response-Complete")
rw.Header().Set("Trailer", "X-Response-Complete")
rw.Header().Set("Transfer-Encoding", "chunked")
filename := fmt.Sprintf("data_exported_%s.%s", time.Now().Format("2006-01-02_150405"), format)
rw.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
}
// executeExport streams data from rowChan to the response writer in the specified format.
func (handler *handler) executeExport(rowChan <-chan *qbtypes.RawRow, errChan <-chan error, format string, rw http.ResponseWriter) (bool, error) {
switch format {
case "csv", "":
rw.Header().Set("Content-Type", "text/csv")
csvWriter := csv.NewWriter(rw)
isComplete, err := handler.exportRawDataCSV(rowChan, errChan, csvWriter)
isComplete, err = handler.exportLogsCSV(rowChan, errChan, csvWriter)
if err != nil {
return false, err
render.Error(rw, err)
return
}
csvWriter.Flush()
return isComplete, nil
case "jsonl":
rw.Header().Set("Content-Type", "application/x-ndjson")
return handler.exportRawDataJSONL(rowChan, errChan, rw)
isComplete, err = handler.exportLogsJSONL(rowChan, errChan, rw)
if err != nil {
render.Error(rw, err)
return
}
default:
return false, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid format: must be csv or jsonl")
render.Error(rw, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid format: must be csv or jsonl"))
return
}
rw.Header().Set("X-Response-Complete", strconv.FormatBool(isComplete))
}
// exportRawDataCSV is a generic CSV export function that works with any raw data (logs, traces, etc.)
func (handler *handler) exportRawDataCSV(rowChan <-chan *qbtypes.RawRow, errChan <-chan error, csvWriter *csv.Writer) (bool, error) {
func (handler *handler) exportLogsCSV(rowChan <-chan *qbtypes.RawRow, errChan <-chan error, csvWriter *csv.Writer) (bool, error) {
var header []string
headerToIndexMapping := make(map[string]int)
headerToIndexMapping := make(map[string]int, len(header))
totalBytes := uint64(0)
for {
@@ -250,8 +268,8 @@ func (handler *handler) exportRawDataCSV(rowChan <-chan *qbtypes.RawRow, errChan
}
}
// exportRawDataJSONL is a generic JSONL export function that works with any raw data (logs, traces, etc.)
func (handler *handler) exportRawDataJSONL(rowChan <-chan *qbtypes.RawRow, errChan <-chan error, writer io.Writer) (bool, error) {
func (handler *handler) exportLogsJSONL(rowChan <-chan *qbtypes.RawRow, errChan <-chan error, writer io.Writer) (bool, error) {
totalBytes := uint64(0)
for {
select {
@@ -259,11 +277,9 @@ func (handler *handler) exportRawDataJSONL(rowChan <-chan *qbtypes.RawRow, errCh
if !ok {
return true, nil
}
jsonBytes, err := json.Marshal(row.Data)
if err != nil {
return false, errors.NewUnexpectedf(errors.CodeInternal, "error marshaling JSON: %s", err)
}
totalBytes += uint64(len(jsonBytes)) + 1
// Handle JSON format (JSONL - one object per line)
jsonBytes, _ := json.Marshal(row.Data)
totalBytes += uint64(len(jsonBytes)) + 1 // +1 for newline
if _, err := writer.Write(jsonBytes); err != nil {
return false, errors.NewUnexpectedf(errors.CodeInternal, "error writing JSON: %s", err)
@@ -283,33 +299,74 @@ func (handler *handler) exportRawDataJSONL(rowChan <-chan *qbtypes.RawRow, errCh
}
}
// priorityColumns defines the columns that should appear first in the CSV output, in order.
var priorityColumns = []string{"timestamp", "id"}
func getExportQuerySource(queryParams url.Values) (string, error) {
switch queryParams.Get("source") {
case "logs", "":
return "logs", nil
case "metrics":
return "metrics", errors.NewInvalidInputf(errors.CodeInvalidInput, "metrics export not yet supported")
case "traces":
return "traces", errors.NewInvalidInputf(errors.CodeInvalidInput, "traces export not yet supported")
default:
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid source: must be logs, metrics or traces")
}
}
func getExportQueryFormat(queryParams url.Values) (string, error) {
switch queryParams.Get("format") {
case "csv", "":
return "csv", nil
case "jsonl":
return "jsonl", nil
default:
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid format: must be csv or jsonl")
}
}
func getExportQueryLimit(queryParams url.Values) (int, error) {
limitStr := queryParams.Get("limit")
if limitStr == "" {
return DefaultExportRowCountLimit, nil
} else {
limit, err := strconv.Atoi(limitStr)
if err != nil {
return 0, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid limit format: %s", err.Error())
}
if limit <= 0 {
return 0, errors.NewInvalidInputf(errors.CodeInvalidInput, "limit must be positive")
}
if limit > MaxExportRowCountLimit {
return 0, errors.NewInvalidInputf(errors.CodeInvalidInput, "limit cannot be more than %d", MaxExportRowCountLimit)
}
return limit, nil
}
}
func getExportQueryTimeRange(queryParams url.Values) (uint64, uint64, error) {
startTimeStr := queryParams.Get("start")
endTimeStr := queryParams.Get("end")
if startTimeStr == "" || endTimeStr == "" {
return 0, 0, errors.NewInvalidInputf(errors.CodeInvalidInput, "start and end time are required")
}
startTime, err := strconv.ParseUint(startTimeStr, 10, 64)
if err != nil {
return 0, 0, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid start time format: %s", err.Error())
}
endTime, err := strconv.ParseUint(endTimeStr, 10, 64)
if err != nil {
return 0, 0, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid end time format: %s", err.Error())
}
return startTime, endTime, nil
}
func constructCSVHeaderFromQueryResponse(data map[string]any) []string {
header := make([]string, 0, len(data))
for key := range data {
header = append(header, key)
}
// This is to ensure CSV output is consistent across multiple queries
slices.SortFunc(header, func(a, b string) int {
ai, bi := slices.Index(priorityColumns, a), slices.Index(priorityColumns, b)
switch {
case ai != -1 && bi != -1:
return ai - bi
case ai != -1:
return -1
case bi != -1:
return 1
default:
if a < b {
return -1
} else if a > b {
return 1
}
return 0
}
})
return header
}
@@ -370,12 +427,9 @@ func constructCSVRecordFromQueryResponse(data map[string]any, headerToIndexMappi
valueStr = v.String()
default:
jsonBytes, err := json.Marshal(v)
if err != nil {
valueStr = fmt.Sprintf("%v", v)
} else {
valueStr = string(jsonBytes)
}
// For all other complex types (maps, structs, etc.)
jsonBytes, _ := json.Marshal(v)
valueStr = string(jsonBytes)
}
record[index] = sanitizeForCSV(valueStr)
@@ -384,6 +438,26 @@ func constructCSVRecordFromQueryResponse(data map[string]any, headerToIndexMappi
return record
}
// getExportQueryColumns parses the "columns" query parameters and returns a slice of TelemetryFieldKey structs.
// Each column should be a valid telemetry field key in the format "context.field:type" or "context.field" or "field"
func getExportQueryColumns(queryParams url.Values) []telemetrytypes.TelemetryFieldKey {
columnParams := queryParams["columns"]
columns := make([]telemetrytypes.TelemetryFieldKey, 0, len(columnParams))
for _, columnStr := range columnParams {
// Skip empty strings
columnStr = strings.TrimSpace(columnStr)
if columnStr == "" {
continue
}
columns = append(columns, telemetrytypes.GetFieldKeyFromKeyText(columnStr))
}
return columns
}
func getsizeOfStringSlice(slice []string) uint64 {
var totalBytes uint64
for _, str := range slice {
@@ -391,3 +465,52 @@ func getsizeOfStringSlice(slice []string) uint64 {
}
return totalBytes
}
// getExportQueryOrderBy parses the "order_by" query parameters and returns a slice of OrderBy structs.
// Each "order_by" parameter should be in the format "column:direction"
// Each "column" should be a valid telemetry field key in the format "context.field:type" or "context.field" or "field"
func getExportQueryOrderBy(queryParams url.Values) ([]qbtypes.OrderBy, error) {
orderByParam := queryParams.Get("order_by")
orderByParam = strings.TrimSpace(orderByParam)
if orderByParam == "" {
return telemetrylogs.DefaultLogsV2SortingOrder, nil
}
parts := strings.Split(orderByParam, ":")
if len(parts) != 2 && len(parts) != 3 {
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order_by format: %s, should be <column>:<direction>", orderByParam)
}
column := strings.Join(parts[:len(parts)-1], ":")
direction := parts[len(parts)-1]
orderDirection, ok := qbtypes.OrderDirectionMap[direction]
if !ok {
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order_by direction: %s, should be one of %s, %s", direction, qbtypes.OrderDirectionAsc, qbtypes.OrderDirectionDesc)
}
orderByKey := telemetrytypes.GetFieldKeyFromKeyText(column)
orderBy := []qbtypes.OrderBy{
{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: orderByKey,
},
Direction: orderDirection,
},
}
// If we are ordering by the timestamp column, also order by the ID column
if orderByKey.Name == telemetrylogs.LogsV2TimestampColumn {
orderBy = append(orderBy, qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: telemetrylogs.LogsV2IDColumn,
},
},
Direction: orderDirection,
})
}
return orderBy, nil
}

View File

@@ -2,85 +2,58 @@ package implrawdataexport
import (
"net/url"
"strconv"
"testing"
"github.com/SigNoz/signoz/pkg/http/binding"
"github.com/SigNoz/signoz/pkg/types/exporttypes"
"github.com/SigNoz/signoz/pkg/telemetrylogs"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/stretchr/testify/assert"
)
func TestExportRawDataQueryParams_BindingDefaults(t *testing.T) {
var params exporttypes.ExportRawDataQueryParams
err := binding.Query.BindQuery(url.Values{}, &params)
assert.NoError(t, err)
assert.Equal(t, "csv", params.Format)
assert.Equal(t, DefaultExportRowCountLimit, params.Limit)
}
func logQuery(limit int) qbtypes.QueryEnvelope {
return qbtypes.QueryEnvelope{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{Limit: limit},
}
}
func traceQuery(limit int) qbtypes.QueryEnvelope {
return qbtypes.QueryEnvelope{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{Limit: limit},
}
}
func traceOperatorQuery(limit int) qbtypes.QueryEnvelope {
return qbtypes.QueryEnvelope{
Type: qbtypes.QueryTypeTraceOperator,
Spec: qbtypes.QueryBuilderTraceOperator{Limit: limit, Expression: "A"},
}
}
func makeRequest(queries ...qbtypes.QueryEnvelope) qbtypes.QueryRangeRequest {
return qbtypes.QueryRangeRequest{
Start: 1000000000000,
End: 1000003600000,
RequestType: qbtypes.RequestTypeRaw,
CompositeQuery: qbtypes.CompositeQuery{Queries: queries},
}
}
func TestValidateSpecForExport(t *testing.T) {
func TestGetExportQuerySource(t *testing.T) {
tests := []struct {
name string
req qbtypes.QueryRangeRequest
expectedError bool
name string
queryParams url.Values
expectedSource string
expectedError bool
}{
{
name: "single log query",
req: makeRequest(logQuery(0)),
name: "default logs source",
queryParams: url.Values{},
expectedSource: "logs",
expectedError: false,
},
{
name: "single trace query",
req: makeRequest(traceQuery(0)),
name: "explicit logs source",
queryParams: url.Values{"source": {"logs"}},
expectedSource: "logs",
expectedError: false,
},
{
name: "trace operator alone",
req: makeRequest(traceOperatorQuery(0)),
name: "metrics source - not supported",
queryParams: url.Values{"source": {"metrics"}},
expectedSource: "metrics",
expectedError: true,
},
{
name: "multiple queries without trace operator",
req: makeRequest(logQuery(0), traceQuery(0)),
expectedError: true,
name: "traces source - not supported",
queryParams: url.Values{"source": {"traces"}},
expectedSource: "traces",
expectedError: true,
},
{
name: "unsupported query type",
req: makeRequest(qbtypes.QueryEnvelope{Type: qbtypes.QueryTypeBuilder, Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{}}),
expectedError: true,
name: "invalid source",
queryParams: url.Values{"source": {"invalid"}},
expectedSource: "",
expectedError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validateSpecForExport(&tt.req)
source, err := getExportQuerySource(tt.queryParams)
assert.Equal(t, tt.expectedSource, source)
if tt.expectedError {
assert.Error(t, err)
} else {
@@ -90,69 +63,456 @@ func TestValidateSpecForExport(t *testing.T) {
}
}
func TestValidateAndApplyDefaultExportLimits(t *testing.T) {
func TestGetExportQueryFormat(t *testing.T) {
tests := []struct {
name string
queries []qbtypes.QueryEnvelope
expectedError bool
checkQueries func(t *testing.T, queries []qbtypes.QueryEnvelope)
name string
queryParams url.Values
expectedFormat string
expectedError bool
}{
{
name: "single log query, zero limit gets default",
queries: makeRequest(logQuery(0)).CompositeQuery.Queries,
checkQueries: func(t *testing.T, q []qbtypes.QueryEnvelope) {
assert.Equal(t, DefaultExportRowCountLimit, q[0].GetLimit())
},
name: "default csv format",
queryParams: url.Values{},
expectedFormat: "csv",
expectedError: false,
},
{
name: "single log query, valid limit kept",
queries: makeRequest(logQuery(1000)).CompositeQuery.Queries,
checkQueries: func(t *testing.T, q []qbtypes.QueryEnvelope) {
assert.Equal(t, 1000, q[0].GetLimit())
},
name: "explicit csv format",
queryParams: url.Values{"format": {"csv"}},
expectedFormat: "csv",
expectedError: false,
},
{
name: "single log query, max limit kept",
queries: makeRequest(logQuery(MaxExportRowCountLimit)).CompositeQuery.Queries,
checkQueries: func(t *testing.T, q []qbtypes.QueryEnvelope) {
assert.Equal(t, MaxExportRowCountLimit, q[0].GetLimit())
},
name: "jsonl format",
queryParams: url.Values{"format": {"jsonl"}},
expectedFormat: "jsonl",
expectedError: false,
},
{
name: "single log query, limit exceeds max",
queries: makeRequest(logQuery(MaxExportRowCountLimit + 1)).CompositeQuery.Queries,
name: "invalid format",
queryParams: url.Values{"format": {"xml"}},
expectedFormat: "",
expectedError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
format, err := getExportQueryFormat(tt.queryParams)
assert.Equal(t, tt.expectedFormat, format)
if tt.expectedError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
})
}
}
func TestGetExportQueryLimit(t *testing.T) {
tests := []struct {
name string
queryParams url.Values
expectedLimit int
expectedError bool
}{
{
name: "default limit",
queryParams: url.Values{},
expectedLimit: DefaultExportRowCountLimit,
expectedError: false,
},
{
name: "valid limit",
queryParams: url.Values{"limit": {"5000"}},
expectedLimit: 5000,
expectedError: false,
},
{
name: "maximum limit",
queryParams: url.Values{"limit": {strconv.Itoa(MaxExportRowCountLimit)}},
expectedLimit: MaxExportRowCountLimit,
expectedError: false,
},
{
name: "limit exceeds maximum",
queryParams: url.Values{"limit": {"100000"}},
expectedLimit: 0,
expectedError: true,
},
{
name: "single log query, negative limit",
queries: makeRequest(logQuery(-1)).CompositeQuery.Queries,
name: "invalid limit format",
queryParams: url.Values{"limit": {"invalid"}},
expectedLimit: 0,
expectedError: true,
},
{
name: "single trace query, zero limit gets default",
queries: makeRequest(traceQuery(0)).CompositeQuery.Queries,
checkQueries: func(t *testing.T, q []qbtypes.QueryEnvelope) {
assert.Equal(t, DefaultExportRowCountLimit, q[0].GetLimit())
name: "negative limit",
queryParams: url.Values{"limit": {"-100"}},
expectedLimit: 0,
expectedError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
limit, err := getExportQueryLimit(tt.queryParams)
assert.Equal(t, tt.expectedLimit, limit)
if tt.expectedError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
})
}
}
func TestGetExportQueryTimeRange(t *testing.T) {
tests := []struct {
name string
queryParams url.Values
expectedStartTime uint64
expectedEndTime uint64
expectedError bool
}{
{
name: "valid time range",
queryParams: url.Values{
"start": {"1640995200"},
"end": {"1641081600"},
},
expectedStartTime: 1640995200,
expectedEndTime: 1641081600,
expectedError: false,
},
{
name: "missing start time",
queryParams: url.Values{"end": {"1641081600"}},
expectedError: true,
},
{
name: "missing end time",
queryParams: url.Values{"start": {"1640995200"}},
expectedError: true,
},
{
name: "missing both times",
queryParams: url.Values{},
expectedError: true,
},
{
name: "invalid start time format",
queryParams: url.Values{
"start": {"invalid"},
"end": {"1641081600"},
},
expectedError: true,
},
{
name: "invalid end time format",
queryParams: url.Values{
"start": {"1640995200"},
"end": {"invalid"},
},
expectedError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
startTime, endTime, err := getExportQueryTimeRange(tt.queryParams)
if tt.expectedError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.Equal(t, tt.expectedStartTime, startTime)
assert.Equal(t, tt.expectedEndTime, endTime)
}
})
}
}
func TestGetExportQueryColumns(t *testing.T) {
tests := []struct {
name string
queryParams url.Values
expectedColumns []telemetrytypes.TelemetryFieldKey
}{
{
name: "no columns specified",
queryParams: url.Values{},
expectedColumns: []telemetrytypes.TelemetryFieldKey{},
},
{
name: "single column",
queryParams: url.Values{
"columns": {"timestamp"},
},
expectedColumns: []telemetrytypes.TelemetryFieldKey{
{Name: "timestamp"},
},
},
{
name: "trace operator alone, zero limit gets default",
queries: makeRequest(traceOperatorQuery(0)).CompositeQuery.Queries,
checkQueries: func(t *testing.T, q []qbtypes.QueryEnvelope) {
assert.Equal(t, DefaultExportRowCountLimit, q[0].GetLimit())
name: "multiple columns",
queryParams: url.Values{
"columns": {"timestamp", "message", "level"},
},
expectedColumns: []telemetrytypes.TelemetryFieldKey{
{Name: "timestamp"},
{Name: "message"},
{Name: "level"},
},
},
{
name: "empty column name (should be skipped)",
queryParams: url.Values{
"columns": {"timestamp", "", "level"},
},
expectedColumns: []telemetrytypes.TelemetryFieldKey{
{Name: "timestamp"},
{Name: "level"},
},
},
{
name: "whitespace column name (should be skipped)",
queryParams: url.Values{
"columns": {"timestamp", " ", "level"},
},
expectedColumns: []telemetrytypes.TelemetryFieldKey{
{Name: "timestamp"},
{Name: "level"},
},
},
{
name: "valid column name with data type",
queryParams: url.Values{
"columns": {"timestamp", "attribute.user:string", "level"},
},
expectedColumns: []telemetrytypes.TelemetryFieldKey{
{Name: "timestamp"},
{Name: "user", FieldContext: telemetrytypes.FieldContextAttribute, FieldDataType: telemetrytypes.FieldDataTypeString},
{Name: "level"},
},
},
{
name: "valid column name with dot notation",
queryParams: url.Values{
"columns": {"timestamp", "attribute.user.string", "level"},
},
expectedColumns: []telemetrytypes.TelemetryFieldKey{
{Name: "timestamp"},
{Name: "user.string", FieldContext: telemetrytypes.FieldContextAttribute},
{Name: "level"},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validateAndApplyDefaultExportLimits(tt.queries)
columns := getExportQueryColumns(tt.queryParams)
assert.Equal(t, len(tt.expectedColumns), len(columns))
for i, expectedCol := range tt.expectedColumns {
assert.Equal(t, expectedCol, columns[i])
}
})
}
}
func TestGetExportQueryOrderBy(t *testing.T) {
tests := []struct {
name string
queryParams url.Values
expectedOrder []qbtypes.OrderBy
expectedError bool
}{
{
name: "no order specified",
queryParams: url.Values{},
expectedOrder: []qbtypes.OrderBy{
{
Direction: qbtypes.OrderDirectionDesc,
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: telemetrylogs.LogsV2TimestampColumn,
},
},
},
{
Direction: qbtypes.OrderDirectionDesc,
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: telemetrylogs.LogsV2IDColumn,
},
},
},
},
expectedError: false,
},
{
name: "single order error, direction not specified",
queryParams: url.Values{
"order_by": {"timestamp"},
},
expectedOrder: nil,
expectedError: true,
},
{
name: "single order no error",
queryParams: url.Values{
"order_by": {"timestamp:asc"},
},
expectedOrder: []qbtypes.OrderBy{
{
Direction: qbtypes.OrderDirectionAsc,
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: telemetrylogs.LogsV2TimestampColumn,
},
},
},
{
Direction: qbtypes.OrderDirectionAsc,
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: telemetrylogs.LogsV2IDColumn,
},
},
},
},
expectedError: false,
},
{
name: "multiple orders",
queryParams: url.Values{
"order_by": {"timestamp:asc", "body:desc", "id:asc"},
},
expectedOrder: []qbtypes.OrderBy{
{
Direction: qbtypes.OrderDirectionAsc,
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: telemetrylogs.LogsV2TimestampColumn,
},
},
},
{
Direction: qbtypes.OrderDirectionAsc,
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: telemetrylogs.LogsV2IDColumn,
},
},
},
},
expectedError: false,
},
{
name: "empty order name (should be skipped)",
queryParams: url.Values{
"order_by": {"timestamp:asc", "", "id:asc"},
},
expectedOrder: []qbtypes.OrderBy{
{
Direction: qbtypes.OrderDirectionAsc,
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: telemetrylogs.LogsV2TimestampColumn,
},
},
},
{
Direction: qbtypes.OrderDirectionAsc,
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: telemetrylogs.LogsV2IDColumn,
},
},
},
},
expectedError: false,
},
{
name: "whitespace order name (should be skipped)",
queryParams: url.Values{
"order_by": {"timestamp:asc", " ", "id:asc"},
},
expectedOrder: []qbtypes.OrderBy{
{
Direction: qbtypes.OrderDirectionAsc,
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: telemetrylogs.LogsV2TimestampColumn,
},
},
},
{
Direction: qbtypes.OrderDirectionAsc,
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: telemetrylogs.LogsV2IDColumn,
},
},
},
},
expectedError: false,
},
{
name: "invalid order name (should error out)",
queryParams: url.Values{
"order_by": {"attributes.user:", "id:asc"},
},
expectedOrder: nil,
expectedError: true,
},
{
name: "valid order name (should be included)",
queryParams: url.Values{
"order_by": {"attribute.user:string:desc", "id:asc"},
},
expectedOrder: []qbtypes.OrderBy{
{
Direction: qbtypes.OrderDirectionDesc,
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "user",
FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
},
},
},
expectedError: false,
},
{
name: "valid order name (should be included)",
queryParams: url.Values{
"order_by": {"attribute.user.string:desc", "id:asc"},
},
expectedOrder: []qbtypes.OrderBy{
{
Direction: qbtypes.OrderDirectionDesc,
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "user.string",
FieldContext: telemetrytypes.FieldContextAttribute,
},
},
},
},
expectedError: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
order, err := getExportQueryOrderBy(tt.queryParams)
if tt.expectedError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
if tt.checkQueries != nil {
tt.checkQueries(t, tt.queries)
assert.Equal(t, len(tt.expectedOrder), len(order))
for i, expectedOrd := range tt.expectedOrder {
assert.Equal(t, expectedOrd, order[i])
}
}
})
@@ -169,8 +529,13 @@ func TestConstructCSVHeaderFromQueryResponse(t *testing.T) {
header := constructCSVHeaderFromQueryResponse(data)
// Priority columns come first in order, then the rest alphabetically.
assert.Equal(t, []string{"timestamp", "id", "level", "message"}, header)
// Since map iteration order is not guaranteed, check that all expected keys are present
expectedKeys := []string{"timestamp", "message", "level", "id"}
assert.Equal(t, len(expectedKeys), len(header))
for _, key := range expectedKeys {
assert.Contains(t, header, key)
}
}
func TestConstructCSVRecordFromQueryResponse(t *testing.T) {

View File

@@ -28,18 +28,8 @@ func (m *Module) ExportRawData(ctx context.Context, orgID valuer.UUID, rangeRequ
instrumentationtypes.CodeFunctionName: "ExportRawData",
})
traceOperatorQueryIndex := rangeRequest.TraceOperatorQueryIndex()
queries := rangeRequest.CompositeQuery.Queries
// If the trace operator query is present, mark the queries other than trace operator as disabled
if traceOperatorQueryIndex > -1 {
for idx := range len(queries) {
if idx != traceOperatorQueryIndex {
queries[idx].SetDisabled(true)
}
}
}
spec := rangeRequest.CompositeQuery.Queries[0].Spec.(qbtypes.QueryBuilderQuery[qbtypes.LogAggregation])
rowCountLimit := spec.Limit
rowChan := make(chan *qbtypes.RawRow, 1)
errChan := make(chan error, 1)
@@ -53,62 +43,52 @@ func (m *Module) ExportRawData(ctx context.Context, orgID valuer.UUID, rangeRequ
defer close(errChan)
defer close(rowChan)
if traceOperatorQueryIndex > -1 {
// If the trace operator query is present, we need to export the data for the trace operator query only
exportRawDataForSingleQuery(m.querier, contextWithTimeout, orgID, rangeRequest, rowChan, errChan, doneChan, traceOperatorQueryIndex)
} else {
// If the trace operator query is not present, we need to export the data for the first query only
exportRawDataForSingleQuery(m.querier, contextWithTimeout, orgID, rangeRequest, rowChan, errChan, doneChan, 0)
rowCount := 0
for rowCount < rowCountLimit {
spec.Limit = min(ChunkSize, rowCountLimit-rowCount)
spec.Offset = rowCount
rangeRequest.CompositeQuery.Queries[0].Spec = spec
response, err := m.querier.QueryRange(contextWithTimeout, orgID, rangeRequest)
if err != nil {
errChan <- err
return
}
newRowsCount := 0
for _, result := range response.Data.Results {
resultData, ok := result.(*qbtypes.RawData)
if !ok {
errChan <- errors.NewInternalf(errors.CodeInternal, "expected RawData, got %T", result)
return
}
newRowsCount += len(resultData.Rows)
for _, row := range resultData.Rows {
select {
case rowChan <- row:
case <-doneChan:
return
case <-ctx.Done():
errChan <- ctx.Err()
return
}
}
}
// Break if we did not receive any new rows
if newRowsCount == 0 {
return
}
rowCount += newRowsCount
}
}()
return rowChan, errChan
}
func exportRawDataForSingleQuery(querier querier.Querier, ctx context.Context, orgID valuer.UUID, rangeRequest *qbtypes.QueryRangeRequest, rowChan chan *qbtypes.RawRow, errChan chan error, doneChan chan any, queryIndex int) {
queries := rangeRequest.CompositeQuery.Queries
rowCountLimit := queries[queryIndex].GetLimit()
rowCount := 0
for rowCount < rowCountLimit {
chunkSize := min(ChunkSize, rowCountLimit-rowCount)
queries[queryIndex].SetLimit(chunkSize)
queries[queryIndex].SetOffset(rowCount)
response, err := querier.QueryRange(ctx, orgID, rangeRequest)
if err != nil {
errChan <- err
return
}
newRowsCount := 0
for _, result := range response.Data.Results {
resultData, ok := result.(*qbtypes.RawData)
if !ok {
errChan <- errors.NewInternalf(errors.CodeInternal, "expected RawData, got %T", result)
return
}
newRowsCount += len(resultData.Rows)
for _, row := range resultData.Rows {
select {
case rowChan <- row:
case <-doneChan:
return
case <-ctx.Done():
errChan <- ctx.Err()
return
}
}
}
rowCount += newRowsCount
// Stop if we received fewer rows than requested — no more data available
if newRowsCount < chunkSize {
return
}
}
}

View File

@@ -574,6 +574,9 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
aH.LicensingAPI.Activate(rw, req)
})).Methods(http.MethodGet)
// Export
router.HandleFunc("/api/v1/export_raw_data", am.ViewAccess(aH.Signoz.Handlers.RawDataExport.ExportRawData)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/span_percentile", am.ViewAccess(aH.Signoz.Handlers.SpanPercentile.GetSpanPercentileDetails)).Methods(http.MethodPost)
// Query Filter Analyzer api used to extract metric names and grouping columns from a query

View File

@@ -13,6 +13,7 @@ import (
"github.com/SigNoz/signoz/pkg/factory/factorytest"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration/implcloudintegration"
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
"github.com/SigNoz/signoz/pkg/modules/user/impluser"
@@ -50,7 +51,7 @@ func TestNewHandlers(t *testing.T) {
userGetter := impluser.NewGetter(impluser.NewStore(sqlstore, providerSettings), flagger)
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter)
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter, implcloudintegration.NewModule())
querierHandler := querier.NewHandler(providerSettings, nil, nil)
handlers := NewHandlers(modules, providerSettings, nil, querierHandler, nil, nil, nil, nil, nil, nil, nil)

View File

@@ -12,6 +12,7 @@ import (
"github.com/SigNoz/signoz/pkg/modules/apdex/implapdex"
"github.com/SigNoz/signoz/pkg/modules/authdomain"
"github.com/SigNoz/signoz/pkg/modules/authdomain/implauthdomain"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer/implmetricsexplorer"
@@ -51,24 +52,25 @@ import (
)
type Modules struct {
OrgGetter organization.Getter
OrgSetter organization.Setter
Preference preference.Module
User user.Module
UserGetter user.Getter
SavedView savedview.Module
Apdex apdex.Module
Dashboard dashboard.Module
QuickFilter quickfilter.Module
TraceFunnel tracefunnel.Module
RawDataExport rawdataexport.Module
AuthDomain authdomain.Module
Session session.Module
Services services.Module
SpanPercentile spanpercentile.Module
MetricsExplorer metricsexplorer.Module
Promote promote.Module
ServiceAccount serviceaccount.Module
OrgGetter organization.Getter
OrgSetter organization.Setter
Preference preference.Module
User user.Module
UserGetter user.Getter
SavedView savedview.Module
Apdex apdex.Module
Dashboard dashboard.Module
QuickFilter quickfilter.Module
TraceFunnel tracefunnel.Module
RawDataExport rawdataexport.Module
AuthDomain authdomain.Module
Session session.Module
Services services.Module
SpanPercentile spanpercentile.Module
MetricsExplorer metricsexplorer.Module
Promote promote.Module
ServiceAccount serviceaccount.Module
CloudIntegration cloudintegration.Module
}
func NewModules(
@@ -89,6 +91,7 @@ func NewModules(
config Config,
dashboard dashboard.Module,
userGetter user.Getter,
cloudIntegration cloudintegration.Module,
) Modules {
quickfilter := implquickfilter.NewModule(implquickfilter.NewStore(sqlstore))
orgSetter := implorganization.NewSetter(implorganization.NewStore(sqlstore), alertmanager, quickfilter)
@@ -96,23 +99,24 @@ func NewModules(
ruleStore := sqlrulestore.NewRuleStore(sqlstore, queryParser, providerSettings)
return Modules{
OrgGetter: orgGetter,
OrgSetter: orgSetter,
Preference: implpreference.NewModule(implpreference.NewStore(sqlstore), preferencetypes.NewAvailablePreference()),
SavedView: implsavedview.NewModule(sqlstore),
Apdex: implapdex.NewModule(sqlstore),
Dashboard: dashboard,
User: user,
UserGetter: userGetter,
QuickFilter: quickfilter,
TraceFunnel: impltracefunnel.NewModule(impltracefunnel.NewStore(sqlstore)),
RawDataExport: implrawdataexport.NewModule(querier),
AuthDomain: implauthdomain.NewModule(implauthdomain.NewStore(sqlstore), authNs),
Session: implsession.NewModule(providerSettings, authNs, user, userGetter, implauthdomain.NewModule(implauthdomain.NewStore(sqlstore), authNs), tokenizer, orgGetter),
SpanPercentile: implspanpercentile.NewModule(querier, providerSettings),
Services: implservices.NewModule(querier, telemetryStore),
MetricsExplorer: implmetricsexplorer.NewModule(telemetryStore, telemetryMetadataStore, cache, ruleStore, dashboard, providerSettings, config.MetricsExplorer),
Promote: implpromote.NewModule(telemetryMetadataStore, telemetryStore),
ServiceAccount: implserviceaccount.NewModule(implserviceaccount.NewStore(sqlstore), authz, emailing, providerSettings),
OrgGetter: orgGetter,
OrgSetter: orgSetter,
Preference: implpreference.NewModule(implpreference.NewStore(sqlstore), preferencetypes.NewAvailablePreference()),
SavedView: implsavedview.NewModule(sqlstore),
Apdex: implapdex.NewModule(sqlstore),
Dashboard: dashboard,
User: user,
UserGetter: userGetter,
QuickFilter: quickfilter,
TraceFunnel: impltracefunnel.NewModule(impltracefunnel.NewStore(sqlstore)),
RawDataExport: implrawdataexport.NewModule(querier),
AuthDomain: implauthdomain.NewModule(implauthdomain.NewStore(sqlstore), authNs),
Session: implsession.NewModule(providerSettings, authNs, user, userGetter, implauthdomain.NewModule(implauthdomain.NewStore(sqlstore), authNs), tokenizer, orgGetter),
SpanPercentile: implspanpercentile.NewModule(querier, providerSettings),
Services: implservices.NewModule(querier, telemetryStore),
MetricsExplorer: implmetricsexplorer.NewModule(telemetryStore, telemetryMetadataStore, cache, ruleStore, dashboard, providerSettings, config.MetricsExplorer),
Promote: implpromote.NewModule(telemetryMetadataStore, telemetryStore),
ServiceAccount: implserviceaccount.NewModule(implserviceaccount.NewStore(sqlstore), authz, emailing, providerSettings),
CloudIntegration: cloudIntegration,
}
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/SigNoz/signoz/pkg/factory/factorytest"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration/implcloudintegration"
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
"github.com/SigNoz/signoz/pkg/modules/user/impluser"
@@ -49,7 +50,7 @@ func TestNewModules(t *testing.T) {
userGetter := impluser.NewGetter(impluser.NewStore(sqlstore, providerSettings), flagger)
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter)
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter, implcloudintegration.NewModule())
reflectVal := reflect.ValueOf(modules)
for i := 0; i < reflectVal.NumField(); i++ {

View File

@@ -22,7 +22,6 @@ import (
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/modules/preference"
"github.com/SigNoz/signoz/pkg/modules/promote"
"github.com/SigNoz/signoz/pkg/modules/rawdataexport"
"github.com/SigNoz/signoz/pkg/modules/serviceaccount"
"github.com/SigNoz/signoz/pkg/modules/session"
"github.com/SigNoz/signoz/pkg/modules/user"
@@ -59,7 +58,6 @@ func NewOpenAPI(ctx context.Context, instrumentation instrumentation.Instrumenta
struct{ gateway.Handler }{},
struct{ fields.Handler }{},
struct{ authz.Handler }{},
struct{ rawdataexport.Handler }{},
struct{ zeus.Handler }{},
struct{ querier.Handler }{},
struct{ serviceaccount.Handler }{},

View File

@@ -256,7 +256,6 @@ func NewAPIServerProviderFactories(orgGetter organization.Getter, authz authz.Au
handlers.GatewayHandler,
handlers.Fields,
handlers.AuthzHandler,
handlers.RawDataExport,
handlers.ZeusHandler,
handlers.QuerierHandler,
handlers.ServiceAccountHandler,

View File

@@ -18,6 +18,7 @@ import (
"github.com/SigNoz/signoz/pkg/gateway"
"github.com/SigNoz/signoz/pkg/instrumentation"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
@@ -43,6 +44,7 @@ import (
"github.com/SigNoz/signoz/pkg/version"
"github.com/SigNoz/signoz/pkg/zeus"
"github.com/SigNoz/signoz/pkg/global"
"github.com/SigNoz/signoz/pkg/web"
)
@@ -91,6 +93,7 @@ func New(
dashboardModuleCallback func(sqlstore.SQLStore, factory.ProviderSettings, analytics.Analytics, organization.Getter, queryparser.QueryParser, querier.Querier, licensing.Licensing) dashboard.Module,
gatewayProviderFactory func(licensing.Licensing) factory.ProviderFactory[gateway.Gateway, gateway.Config],
querierHandlerCallback func(factory.ProviderSettings, querier.Querier, analytics.Analytics) querier.Handler,
cloudIntegrationModuleCallback func(sqlstore.SQLStore, licensing.Licensing, zeus.Zeus, gateway.Gateway, global.Config) cloudintegration.Module,
) (*SigNoz, error) {
// Initialize instrumentation
instrumentation, err := instrumentation.New(ctx, config.Instrumentation, version.Info, "signoz")
@@ -387,8 +390,11 @@ func New(
return nil, err
}
// Initialize cloudintegration module via callback
cloudIntegrationModule := cloudIntegrationModuleCallback(sqlstore, licensing, zeus, gateway, config.Global)
// Initialize all modules
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, analytics, querier, telemetrystore, telemetryMetadataStore, authNs, authz, cache, queryParser, config, dashboard, userGetter)
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, analytics, querier, telemetrystore, telemetryMetadataStore, authNs, authz, cache, queryParser, config, dashboard, userGetter, cloudIntegrationModule)
userService := impluser.NewService(providerSettings, impluser.NewStore(sqlstore, providerSettings), modules.User, orgGetter, authz, config.User.Root)

View File

@@ -0,0 +1,49 @@
package cloudintegrationtypes
import (
"time"
"github.com/SigNoz/signoz/pkg/valuer"
)
type (
ConnectedAccounts struct {
Accounts []*Account `json:"accounts"`
}
GettableConnectedAccounts = ConnectedAccounts
UpdateAccountConfigRequest struct {
AWS *AWSAccountConfig `json:"aws"`
}
UpdatableAccountConfig = UpdateAccountConfigRequest
)
type (
Account struct {
Id string `json:"id"`
ProviderAccountId *string `json:"providerAccountID,omitempty"`
Provider CloudProviderType `json:"provider"`
RemovedAt *time.Time `json:"removedAt,omitempty"`
AgentReport *AgentReport `json:"agentReport,omitempty"`
OrgID valuer.UUID `json:"orgID"`
Config *AccountConfig `json:"accountConfig,omitempty"`
}
GettableAccount = Account
)
// AgentReport represents heartbeats sent by the agent.
type AgentReport struct {
TimestampMillis int64 `json:"timestampMillis"`
Data map[string]any `json:"data"`
}
type AccountConfig struct {
AWS *AWSAccountConfig `json:"aws,omitempty"`
}
type AWSAccountConfig struct {
Regions []string `json:"regions"`
}

View File

@@ -0,0 +1,82 @@
package cloudintegrationtypes
import (
"database/sql/driver"
"encoding/json"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/uptrace/bun"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/valuer"
)
var (
ErrCodeCloudIntegrationNotFound = errors.MustNewCode("cloud_integration_not_found")
)
// StorableCloudIntegration represents a cloud integration stored in the database.
// This is also referred as "Account" in the context of cloud integrations.
type StorableCloudIntegration struct {
bun.BaseModel `bun:"table:cloud_integration"`
types.Identifiable
types.TimeAuditable
Provider CloudProviderType `json:"provider" bun:"provider,type:text"`
// Config is provider specific data in JSON string format
Config string `json:"config" bun:"config,type:text"`
AccountID *string `json:"account_id" bun:"account_id,type:text"`
LastAgentReport *StorableAgentReport `json:"last_agent_report" bun:"last_agent_report,type:text"`
RemovedAt *time.Time `json:"removed_at" bun:"removed_at,type:timestamp,nullzero"`
OrgID valuer.UUID `bun:"org_id,type:text"`
}
// StorableAgentReport represents the last heartbeat and arbitrary data sent by the agent
// as of now there is no use case for Data field, but keeping it for backwards compatibility with older structure.
type StorableAgentReport struct {
TimestampMillis int64 `json:"timestamp_millis"`
Data map[string]any `json:"data"`
}
// StorableCloudIntegrationService is to store service config for a cloud integration, which is a cloud provider specific configuration.
type StorableCloudIntegrationService struct {
bun.BaseModel `bun:"table:cloud_integration_service,alias:cis"`
types.Identifiable
types.TimeAuditable
Type valuer.String `bun:"type,type:text,notnull,unique:cloud_integration_id_type"`
// Config is cloud provider's service specific data in JSON string format
Config string `bun:"config,type:text"`
CloudIntegrationID valuer.UUID `bun:"cloud_integration_id,type:text,notnull,unique:cloud_integration_id_type,references:cloud_integration(id),on_delete:cascade"`
}
// Scan scans value from DB
func (r *StorableAgentReport) Scan(src any) error {
var data []byte
switch v := src.(type) {
case []byte:
data = v
case string:
data = []byte(v)
default:
return errors.NewInternalf(errors.CodeInternal, "tried to scan from %T instead of string or bytes", src)
}
return json.Unmarshal(data, r)
}
// Value creates value to be stored in DB
func (r *StorableAgentReport) Value() (driver.Value, error) {
if r == nil {
return nil, errors.NewInternalf(errors.CodeInternal, "agent report is nil")
}
serialized, err := json.Marshal(r)
if err != nil {
return nil, errors.WrapInternalf(
err, errors.CodeInternal, "couldn't serialize agent report to JSON",
)
}
// Return as string instead of []byte to ensure PostgreSQL stores as text, not bytes
return string(serialized), nil
}

View File

@@ -0,0 +1,41 @@
package cloudintegrationtypes
import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/valuer"
)
// CloudProviderType type alias
type CloudProviderType struct{ valuer.String }
var (
// cloud providers
CloudProviderTypeAWS = CloudProviderType{valuer.NewString("aws")}
CloudProviderTypeAzure = CloudProviderType{valuer.NewString("azure")}
// errors
ErrCodeCloudProviderInvalidInput = errors.MustNewCode("invalid_cloud_provider")
AWSIntegrationUserEmail = valuer.MustNewEmail("aws-integration@signoz.io")
AzureIntegrationUserEmail = valuer.MustNewEmail("azure-integration@signoz.io")
)
// CloudIntegrationUserEmails is the list of valid emails for Cloud One Click integrations.
// This is used for validation and restrictions in different contexts, across codebase.
var CloudIntegrationUserEmails = []valuer.Email{
AWSIntegrationUserEmail,
AzureIntegrationUserEmail,
}
// NewCloudProvider returns a new CloudProviderType from a string.
// It validates the input and returns an error if the input is not valid cloud provider.
func NewCloudProvider(provider string) (CloudProviderType, error) {
switch provider {
case CloudProviderTypeAWS.StringValue():
return CloudProviderTypeAWS, nil
case CloudProviderTypeAzure.StringValue():
return CloudProviderTypeAzure, nil
default:
return CloudProviderType{}, errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput, "invalid cloud provider: %s", provider)
}
}

View File

@@ -0,0 +1,103 @@
package cloudintegrationtypes
import "github.com/SigNoz/signoz/pkg/types/integrationtypes"
// request for creating connection artifact
type (
PostableConnectionArtifact = ConnectionArtifactRequest
ConnectionArtifactRequest struct {
Aws *AWSConnectionArtifactRequest `json:"aws"`
}
AWSConnectionArtifactRequest struct {
DeploymentRegion string `json:"deploymentRegion"`
Regions []string `json:"regions"`
}
)
type (
// SignozCredentials is used to configure Agents to connect with Signoz
SignozCredentials struct {
SigNozAPIUrl string
SigNozAPIKey string // PAT
IngestionUrl string
IngestionKey string
}
ConnectionArtifact struct {
Aws *AWSConnectionArtifact `json:"aws"`
}
AWSConnectionArtifact struct {
ConnectionUrl string `json:"connectionURL"`
}
GettableConnectionArtifact = ConnectionArtifact
)
type (
AccountStatus struct {
Id string `json:"id"`
ProviderAccountId *string `json:"providerAccountID,omitempty"`
Status integrationtypes.AccountStatus `json:"status"`
}
GettableAccountStatus = AccountStatus
)
type (
AgentCheckInRequest struct {
// older backward compatible fields are mapped to new fields
// CloudIntegrationId string `json:"cloudIntegrationId"`
// AccountId string `json:"accountId"`
// New fields
ProviderAccountId string `json:"providerAccountId"`
CloudAccountId string `json:"cloudAccountId"`
Data map[string]any `json:"data,omitempty"`
}
PostableAgentCheckInRequest struct {
AgentCheckInRequest
// following are backward compatible fields for older running agents
CloudIntegrationId string `json:"cloud_integration_id"`
CloudAccountId string `json:"cloud_account_id"`
}
GettableAgentCheckInResponse struct {
AgentCheckInResponse
CloudIntegrationId string `json:"cloud_integration_id"`
AccountId string `json:"account_id"`
}
AgentCheckInResponse struct {
// Older fields for backward compatibility are mapped to new fields below
// CloudIntegrationId string `json:"cloud_integration_id"`
// AccountId string `json:"account_id"`
// New fields
ProviderAccountId string `json:"providerAccountId"`
CloudAccountId string `json:"cloudAccountId"`
// IntegrationConfig populates data related to integration that is required for an agent
// to start collecting telemetry data
// keeping JSON key snake_case for backward compatibility
IntegrationConfig *IntegrationConfig `json:"integration_config,omitempty"`
}
IntegrationConfig struct {
EnabledRegions []string `json:"enabledRegions"` // backward compatible
Telemetry *AWSCollectionStrategy `json:"telemetry,omitempty"` // backward compatible
// new fields
AWS *AWSIntegrationConfig `json:"aws,omitempty"`
}
AWSIntegrationConfig struct {
EnabledRegions []string `json:"enabledRegions"`
Telemetry *AWSCollectionStrategy `json:"telemetry,omitempty"`
}
)

View File

@@ -0,0 +1,103 @@
package cloudintegrationtypes
import (
"github.com/SigNoz/signoz/pkg/errors"
)
var (
CodeInvalidCloudRegion = errors.MustNewCode("invalid_cloud_region")
CodeMismatchCloudProvider = errors.MustNewCode("cloud_provider_mismatch")
)
// List of all valid cloud regions on Amazon Web Services
var ValidAWSRegions = map[string]struct{}{
"af-south-1": {}, // Africa (Cape Town).
"ap-east-1": {}, // Asia Pacific (Hong Kong).
"ap-northeast-1": {}, // Asia Pacific (Tokyo).
"ap-northeast-2": {}, // Asia Pacific (Seoul).
"ap-northeast-3": {}, // Asia Pacific (Osaka).
"ap-south-1": {}, // Asia Pacific (Mumbai).
"ap-south-2": {}, // Asia Pacific (Hyderabad).
"ap-southeast-1": {}, // Asia Pacific (Singapore).
"ap-southeast-2": {}, // Asia Pacific (Sydney).
"ap-southeast-3": {}, // Asia Pacific (Jakarta).
"ap-southeast-4": {}, // Asia Pacific (Melbourne).
"ca-central-1": {}, // Canada (Central).
"ca-west-1": {}, // Canada West (Calgary).
"eu-central-1": {}, // Europe (Frankfurt).
"eu-central-2": {}, // Europe (Zurich).
"eu-north-1": {}, // Europe (Stockholm).
"eu-south-1": {}, // Europe (Milan).
"eu-south-2": {}, // Europe (Spain).
"eu-west-1": {}, // Europe (Ireland).
"eu-west-2": {}, // Europe (London).
"eu-west-3": {}, // Europe (Paris).
"il-central-1": {}, // Israel (Tel Aviv).
"me-central-1": {}, // Middle East (UAE).
"me-south-1": {}, // Middle East (Bahrain).
"sa-east-1": {}, // South America (Sao Paulo).
"us-east-1": {}, // US East (N. Virginia).
"us-east-2": {}, // US East (Ohio).
"us-west-1": {}, // US West (N. California).
"us-west-2": {}, // US West (Oregon).
}
// List of all valid cloud regions for Microsoft Azure
var ValidAzureRegions = map[string]struct{}{
"australiacentral": {}, // Australia Central
"australiacentral2": {}, // Australia Central 2
"australiaeast": {}, // Australia East
"australiasoutheast": {}, // Australia Southeast
"austriaeast": {}, // Austria East
"belgiumcentral": {}, // Belgium Central
"brazilsouth": {}, // Brazil South
"brazilsoutheast": {}, // Brazil Southeast
"canadacentral": {}, // Canada Central
"canadaeast": {}, // Canada East
"centralindia": {}, // Central India
"centralus": {}, // Central US
"chilecentral": {}, // Chile Central
"denmarkeast": {}, // Denmark East
"eastasia": {}, // East Asia
"eastus": {}, // East US
"eastus2": {}, // East US 2
"francecentral": {}, // France Central
"francesouth": {}, // France South
"germanynorth": {}, // Germany North
"germanywestcentral": {}, // Germany West Central
"indonesiacentral": {}, // Indonesia Central
"israelcentral": {}, // Israel Central
"italynorth": {}, // Italy North
"japaneast": {}, // Japan East
"japanwest": {}, // Japan West
"koreacentral": {}, // Korea Central
"koreasouth": {}, // Korea South
"malaysiawest": {}, // Malaysia West
"mexicocentral": {}, // Mexico Central
"newzealandnorth": {}, // New Zealand North
"northcentralus": {}, // North Central US
"northeurope": {}, // North Europe
"norwayeast": {}, // Norway East
"norwaywest": {}, // Norway West
"polandcentral": {}, // Poland Central
"qatarcentral": {}, // Qatar Central
"southafricanorth": {}, // South Africa North
"southafricawest": {}, // South Africa West
"southcentralus": {}, // South Central US
"southindia": {}, // South India
"southeastasia": {}, // Southeast Asia
"spaincentral": {}, // Spain Central
"swedencentral": {}, // Sweden Central
"switzerlandnorth": {}, // Switzerland North
"switzerlandwest": {}, // Switzerland West
"uaecentral": {}, // UAE Central
"uaenorth": {}, // UAE North
"uksouth": {}, // UK South
"ukwest": {}, // UK West
"westcentralus": {}, // West Central US
"westeurope": {}, // West Europe
"westindia": {}, // West India
"westus": {}, // West US
"westus2": {}, // West US 2
"westus3": {}, // West US 3
}

View File

@@ -0,0 +1,203 @@
package cloudintegrationtypes
import (
"fmt"
"time"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
var S3Sync = valuer.NewString("s3sync")
type (
ServicesSummary struct {
Services []*ServiceSummary `json:"services"`
}
ServiceSummary struct {
ServiceDefinitionMetadata
ServiceConfig *ServiceConfig `json:"serviceConfig"`
}
GettableServiceSummary = ServiceSummary
GettableServicesSummary = ServicesSummary
Service struct {
ServiceDefinition
ServiceConfig *ServiceConfig `json:"serviceConfig"`
}
GettableService = Service
UpdateServiceConfigRequest struct {
ProviderAccountID string `json:"providerAccountId"`
ServiceConfig *ServiceConfig `json:"serviceConfig"`
}
)
type ServiceConfig struct {
AWS *AWSServiceConfig `json:"aws,omitempty"`
}
type AWSServiceConfig struct {
Logs *AWSServiceLogsConfig `json:"logs"`
Metrics *AWSServiceMetricsConfig `json:"metrics"`
}
// AWSServiceLogsConfig is AWS specific logs config for a service
// NOTE: the JSON keys are snake case for backward compatibility with existing agents
type AWSServiceLogsConfig struct {
Enabled bool `json:"enabled"`
S3Buckets map[string][]string `json:"s3_buckets,omitempty"`
}
type AWSServiceMetricsConfig struct {
Enabled bool `json:"enabled"`
}
// DefinitionMetadata represents service definition metadata. This is useful for showing service overview
type ServiceDefinitionMetadata struct {
Id string `json:"id"`
Title string `json:"title"`
Icon string `json:"icon"`
}
type ServiceDefinition struct {
ServiceDefinitionMetadata
Overview string `json:"overview"` // markdown
Assets Assets `json:"assets"`
SupportedSignals SupportedSignals `json:"supported_signals"`
DataCollected DataCollected `json:"dataCollected"`
Strategy *CollectionStrategy `json:"telemetryCollectionStrategy"`
}
// CollectionStrategy is cloud provider specific configuration for signal collection,
// this is used by agent to understand the nitty-gritty for collecting telemetry for the cloud provider.
type CollectionStrategy struct {
AWS *AWSCollectionStrategy `json:"aws,omitempty"`
}
// Assets represents the collection of dashboards
type Assets struct {
Dashboards []Dashboard `json:"dashboards"`
}
// SupportedSignals for cloud provider's service
type SupportedSignals struct {
Logs bool `json:"logs"`
Metrics bool `json:"metrics"`
}
// DataCollected is curated static list of metrics and logs, this is shown as part of service overview
type DataCollected struct {
Logs []CollectedLogAttribute `json:"logs"`
Metrics []CollectedMetric `json:"metrics"`
}
// CollectedLogAttribute represents a log attribute that is present in all log entries for a service,
// this is shown as part of service overview
type CollectedLogAttribute struct {
Name string `json:"name"`
Path string `json:"path"`
Type string `json:"type"`
}
// CollectedMetric represents a metric that is collected for a service, this is shown as part of service overview
type CollectedMetric struct {
Name string `json:"name"`
Type string `json:"type"`
Unit string `json:"unit"`
Description string `json:"description"`
}
// AWSCollectionStrategy represents signal collection strategy for AWS services.
// this is AWS specific.
// NOTE: this structure is still using snake case, for backward compatibility,
// with existing agents
type AWSCollectionStrategy struct {
Metrics *AWSMetricsStrategy `json:"aws_metrics,omitempty"`
Logs *AWSLogsStrategy `json:"aws_logs,omitempty"`
S3Buckets map[string][]string `json:"s3_buckets,omitempty"` // Only available in S3 Sync Service Type in AWS
}
// AWSMetricsStrategy represents metrics collection strategy for AWS services.
// this is AWS specific.
// NOTE: this structure is still using snake case, for backward compatibility,
// with existing agents
type AWSMetricsStrategy struct {
// to be used as https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-metricstream.html#cfn-cloudwatch-metricstream-includefilters
StreamFilters []struct {
// json tags here are in the shape expected by AWS API as detailed at
// https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudwatch-metricstream-metricstreamfilter.html
Namespace string `json:"Namespace"`
MetricNames []string `json:"MetricNames,omitempty"`
} `json:"cloudwatch_metric_stream_filters"`
}
// AWSLogsStrategy represents logs collection strategy for AWS services.
// this is AWS specific.
// NOTE: this structure is still using snake case, for backward compatibility,
// with existing agents
type AWSLogsStrategy struct {
Subscriptions []struct {
// subscribe to all logs groups with specified prefix.
// eg: `/aws/rds/`
LogGroupNamePrefix string `json:"log_group_name_prefix"`
// https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html
// "" implies no filtering is required.
FilterPattern string `json:"filter_pattern"`
} `json:"cloudwatch_logs_subscriptions"`
}
// Dashboard represents a dashboard definition for cloud integration.
type Dashboard struct {
Id string `json:"id"`
Url string `json:"url"`
Title string `json:"title"`
Description string `json:"description"`
Image string `json:"image"`
Definition *dashboardtypes.StorableDashboardData `json:"definition,omitempty"`
}
// UTILS
// GetCloudIntegrationDashboardID returns the dashboard id for a cloud integration, given the cloud provider, service id, and dashboard id.
// This is used to generate unique dashboard ids for cloud integration, and also to parse the dashboard id to get the cloud provider and service id when needed.
func GetCloudIntegrationDashboardID(cloudProvider CloudProviderType, svcId, dashboardId string) string {
return fmt.Sprintf("cloud-integration--%s--%s--%s", cloudProvider, svcId, dashboardId)
}
// GetDashboardsFromAssets returns the list of dashboards for the cloud provider service from definition
func GetDashboardsFromAssets(
svcId string,
orgID valuer.UUID,
cloudProvider CloudProviderType,
createdAt *time.Time,
assets Assets,
) []*dashboardtypes.Dashboard {
dashboards := make([]*dashboardtypes.Dashboard, 0)
for _, d := range assets.Dashboards {
author := fmt.Sprintf("%s-integration", cloudProvider)
dashboards = append(dashboards, &dashboardtypes.Dashboard{
ID: GetCloudIntegrationDashboardID(cloudProvider, svcId, d.Id),
Locked: true,
OrgID: orgID,
Data: *d.Definition,
TimeAuditable: types.TimeAuditable{
CreatedAt: *createdAt,
UpdatedAt: *createdAt,
},
UserAuditable: types.UserAuditable{
CreatedBy: author,
UpdatedBy: author,
},
})
}
return dashboards
}

View File

@@ -0,0 +1,35 @@
package cloudintegrationtypes
import (
"context"
"github.com/SigNoz/signoz/pkg/valuer"
)
type Store interface {
// GetAccountByID returns a cloud integration account by id
GetAccountByID(ctx context.Context, orgID, id valuer.UUID, provider CloudProviderType) (*StorableCloudIntegration, error)
// UpsertAccount creates or updates a cloud integration account
UpsertAccount(ctx context.Context, account *StorableCloudIntegration) error
// RemoveAccount marks a cloud integration account as removed by setting the RemovedAt field
RemoveAccount(ctx context.Context, orgID, id valuer.UUID, provider CloudProviderType) error
// GetConnectedAccounts returns all the cloud integration accounts for the org and cloud provider
GetConnectedAccounts(ctx context.Context, orgID valuer.UUID, provider CloudProviderType) ([]*StorableCloudIntegration, error)
// GetConnectedAccount for given provider
GetConnectedAccount(ctx context.Context, orgID valuer.UUID, provider CloudProviderType, providerAccountID string) (*StorableCloudIntegration, error)
// cloud_integration_service related methods
// GetServiceByType returns the cloud integration service for the given cloud integration id and service type
GetServiceByType(ctx context.Context, cloudIntegrationID valuer.UUID, serviceType string) (*StorableCloudIntegrationService, error)
// UpsertService creates or updates a cloud integration service for the given cloud integration id and service type
UpsertService(ctx context.Context, service *StorableCloudIntegrationService) error
// GetServices returns all the cloud integration services for the given cloud integration id
GetServices(ctx context.Context, cloudIntegrationID valuer.UUID) ([]*StorableCloudIntegrationService, error)
}

View File

@@ -1,126 +0,0 @@
package exporttypes
import (
"strings"
"github.com/SigNoz/signoz/pkg/errors"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
// ExportRawDataQueryParams represents the query parameters for the export raw data endpoint
type ExportRawDataQueryParams struct {
ExportRawDataFormatQueryParam
// Signal specifies the type of data to export: "logs" or "traces"
Signal telemetrytypes.Signal `query:"signal" enum:"logs,traces" required:"true" description:"The type of data to export."`
// Source specifies the type of data to export: "logs" or "traces"
// Deprecated: Use Signal instead.
Source string `query:"source" deprecated:"true" description:"Deprecated: use signal instead."`
// Start is the start time for the query (Unix timestamp in nanoseconds)
Start uint64 `query:"start" description:"The start time for the query in unix timestamp nanoseconds."`
// End is the end time for the query (Unix timestamp in nanoseconds)
End uint64 `query:"end" description:"The end time for the query in unix timestamp nanoseconds."`
// Limit specifies the maximum number of rows to export
Limit int `query:"limit,default=10000" default:"10000" minimum:"1" maximum:"50000" description:"The maximum number of rows to export."`
// Filter is a filter expression to apply to the query
// Deprecated: Use FilterExpression instead.
FilterString string `query:"filter" deprecated:"true" description:"Deprecated: use filterExpression instead."`
Filter qbtypes.Filter `query:"filterExpression" description:"The filter expression to apply to the query."`
// Columns specifies the columns to include in the export
// Format: ["context.field:type", "context.field", "field"]
// Deprecated: Use SelectFields instead.
Columns []string `query:"columns" deprecated:"true" description:"Deprecated: use selectFields instead."`
// SelectFields specifies the columns to include in the export
SelectFields []telemetrytypes.TelemetryFieldKey `query:"selectFields" description:"The columns to include in the export."`
// OrderBy specifies the sorting order
// Format: "column:direction" or "context.field:type:direction"
// Direction can be "asc" or "desc"
// Deprecated: Use Order instead.
OrderBy string `query:"order_by" deprecated:"true" description:"Deprecated: use order instead."`
// Order specifies the sorting order with keys and directions
Order []qbtypes.OrderBy `query:"order" description:"The sorting order with keys and directions."`
}
type ExportRawDataFormatQueryParam struct {
// Format specifies the output format: "csv" or "jsonl"
Format string `query:"format,default=csv" default:"csv" enum:"csv,jsonl" description:"The output format for the export."`
}
func (p *ExportRawDataQueryParams) Normalize() {
if len(p.Order) == 0 && len(p.OrderBy) > 0 {
p.Order = parseExportQueryOrderBy(p.OrderBy)
}
if len(p.SelectFields) == 0 && len(p.Columns) != 0 {
p.SelectFields = parseExportQueryColumns(p.Columns)
}
if len(p.Filter.Expression) == 0 && len(p.FilterString) > 0 {
p.Filter = qbtypes.Filter{Expression: p.FilterString}
}
if p.Signal == telemetrytypes.SignalUnspecified && p.Source != "" {
p.Signal = telemetrytypes.Signal{String: valuer.NewString(p.Source)}
}
}
func (p *ExportRawDataQueryParams) Validate() error {
if p.Signal != telemetrytypes.SignalLogs && p.Signal != telemetrytypes.SignalTraces {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid signal %s", p.Signal).WithAdditional("Allowed values: [logs, traces]")
}
return nil
}
// parseExportQueryColumns converts bound column strings to TelemetryFieldKey structs.
// Each column should be in the format "context.field:type" or "context.field" or "field"
func parseExportQueryColumns(columnParams []string) []telemetrytypes.TelemetryFieldKey {
columns := make([]telemetrytypes.TelemetryFieldKey, 0, len(columnParams))
for _, columnStr := range columnParams {
columnStr = strings.TrimSpace(columnStr)
if columnStr == "" {
continue
}
columns = append(columns, telemetrytypes.GetFieldKeyFromKeyText(columnStr))
}
return columns
}
// parseExportQueryOrderBy converts a bound order_by string to an OrderBy slice.
// The string should be in the format "column:direction" and validation is handled by the downstream.
func parseExportQueryOrderBy(orderByParam string) []qbtypes.OrderBy {
orderByParam = strings.TrimSpace(orderByParam)
if orderByParam == "" {
return []qbtypes.OrderBy{}
}
parts := strings.Split(orderByParam, ":")
// Here we silently ignore the error as this is deprecated code path
if len(parts) < 2 {
return []qbtypes.OrderBy{}
}
column := strings.Join(parts[:len(parts)-1], ":")
direction := parts[len(parts)-1]
return []qbtypes.OrderBy{
{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.GetFieldKeyFromKeyText(column),
},
Direction: qbtypes.OrderDirectionMap[direction],
},
}
}

View File

@@ -1,174 +0,0 @@
package exporttypes
import (
"testing"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/stretchr/testify/assert"
)
func TestParseExportQueryColumns(t *testing.T) {
tests := []struct {
name string
input []string
expectedColumns []telemetrytypes.TelemetryFieldKey
}{
{
name: "empty input",
input: []string{},
expectedColumns: []telemetrytypes.TelemetryFieldKey{},
},
{
name: "single column",
input: []string{"timestamp"},
expectedColumns: []telemetrytypes.TelemetryFieldKey{
{Name: "timestamp"},
},
},
{
name: "multiple columns",
input: []string{"timestamp", "message", "level"},
expectedColumns: []telemetrytypes.TelemetryFieldKey{
{Name: "timestamp"},
{Name: "message"},
{Name: "level"},
},
},
{
name: "empty entry is skipped",
input: []string{"timestamp", "", "level"},
expectedColumns: []telemetrytypes.TelemetryFieldKey{
{Name: "timestamp"},
{Name: "level"},
},
},
{
name: "whitespace-only entry is skipped",
input: []string{"timestamp", " ", "level"},
expectedColumns: []telemetrytypes.TelemetryFieldKey{
{Name: "timestamp"},
{Name: "level"},
},
},
{
name: "column with context and type",
input: []string{"attribute.user:string"},
expectedColumns: []telemetrytypes.TelemetryFieldKey{
{Name: "user", FieldContext: telemetrytypes.FieldContextAttribute, FieldDataType: telemetrytypes.FieldDataTypeString},
},
},
{
name: "column with context, dot-notation name",
input: []string{"attribute.user.string"},
expectedColumns: []telemetrytypes.TelemetryFieldKey{
{Name: "user.string", FieldContext: telemetrytypes.FieldContextAttribute},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
columns := parseExportQueryColumns(tt.input)
assert.Equal(t, len(tt.expectedColumns), len(columns))
for i, expected := range tt.expectedColumns {
assert.Equal(t, expected, columns[i])
}
})
}
}
func TestParseExportQueryOrderBy(t *testing.T) {
tests := []struct {
name string
input string
expectedOrder []qbtypes.OrderBy
}{
{
name: "empty string returns empty slice",
input: "",
expectedOrder: []qbtypes.OrderBy{},
},
{
name: "simple column asc",
input: "timestamp:asc",
expectedOrder: []qbtypes.OrderBy{
{
Direction: qbtypes.OrderDirectionAsc,
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "timestamp"},
},
},
},
},
{
name: "simple column desc",
input: "timestamp:desc",
expectedOrder: []qbtypes.OrderBy{
{
Direction: qbtypes.OrderDirectionDesc,
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "timestamp"},
},
},
},
},
{
name: "column with context and type qualifier",
input: "attribute.user:string:desc",
expectedOrder: []qbtypes.OrderBy{
{
Direction: qbtypes.OrderDirectionDesc,
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "user",
FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
},
},
},
},
{
name: "column with context, dot-notation name",
input: "attribute.user.string:desc",
expectedOrder: []qbtypes.OrderBy{
{
Direction: qbtypes.OrderDirectionDesc,
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "user.string",
FieldContext: telemetrytypes.FieldContextAttribute,
},
},
},
},
},
{
name: "resource with context and type",
input: "resource.service.name:string:asc",
expectedOrder: []qbtypes.OrderBy{
{
Direction: qbtypes.OrderDirectionAsc,
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
order := parseExportQueryOrderBy(tt.input)
assert.Equal(t, len(tt.expectedOrder), len(order))
for i, expected := range tt.expectedOrder {
assert.Equal(t, expected, order[i])
}
})
}
}

View File

@@ -393,74 +393,6 @@ func (r *QueryRangeRequest) HasOrderSpecified() bool {
return false
}
// UseDefaultOrderBy applies UseDefaultOrderByForListQuery to every query in the
// composite query when the request type is a list query (raw, raw_stream, trace).
func (r *QueryRangeRequest) UseDefaultOrderBy() {
if r.RequestType.IsAggregation() {
return
}
for idx := range r.CompositeQuery.Queries {
r.CompositeQuery.Queries[idx].UseDefaultOrderByForListQuery()
}
}
// UseDefaultOrderByForListQuery applies a default timestamp-descending order
// for list/raw queries when no explicit order is specified. This is intended
// for raw data listing endpoints (e.g. export, list views) where a sensible
// default sort is needed, not for aggregation or timeseries queries.
func (q *QueryEnvelope) UseDefaultOrderByForListQuery() {
if len(q.GetOrder()) > 0 {
return
}
switch q.Spec.(type) {
case QueryBuilderQuery[TraceAggregation],
QueryBuilderTraceOperator:
q.SetOrder(
[]OrderBy{
{
Key: OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "timestamp",
Signal: telemetrytypes.SignalTraces,
FieldContext: telemetrytypes.FieldContextSpan,
FieldDataType: telemetrytypes.FieldDataTypeNumber,
},
},
Direction: OrderDirectionDesc,
},
},
)
case QueryBuilderQuery[LogAggregation]:
q.SetOrder(
[]OrderBy{
{
Key: OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "timestamp",
Signal: telemetrytypes.SignalLogs,
FieldContext: telemetrytypes.FieldContextLog,
FieldDataType: telemetrytypes.FieldDataTypeNumber,
},
},
Direction: OrderDirectionDesc,
},
{
Key: OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "id",
Signal: telemetrytypes.SignalLogs,
FieldContext: telemetrytypes.FieldContextLog,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
},
Direction: OrderDirectionDesc,
},
},
)
}
}
func (r *QueryRangeRequest) FuncsForQuery(name string) []Function {
funcs := []Function{}
for _, query := range r.CompositeQuery.Queries {
@@ -505,16 +437,6 @@ func (r *QueryRangeRequest) IsAnomalyRequest() (*QueryBuilderQuery[MetricAggrega
return &q, hasAnomaly
}
func (r *QueryRangeRequest) TraceOperatorQueryIndex() int {
for idx, query := range r.CompositeQuery.Queries {
switch query.Spec.(type) {
case QueryBuilderTraceOperator:
return idx
}
}
return -1
}
// We do not support fill gaps for these queries. Maybe support in future?
func (r *QueryRangeRequest) SkipFillGaps(name string) bool {
for _, query := range r.CompositeQuery.Queries {

View File

@@ -1,379 +0,0 @@
package querybuildertypesv5
import "github.com/SigNoz/signoz/pkg/types/telemetrytypes"
// GetExpression returns the expression string.
func (q *QueryEnvelope) GetExpression() string {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
return spec.Expression
case QueryBuilderFormula:
return spec.Expression
}
return ""
}
// GetReturnSpansFrom returns the return-spans-from value.
func (q *QueryEnvelope) GetReturnSpansFrom() string {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
return spec.ReturnSpansFrom
}
return ""
}
// GetSignal returns the signal.
func (q *QueryEnvelope) GetSignal() telemetrytypes.Signal {
switch spec := q.Spec.(type) {
case QueryBuilderQuery[TraceAggregation]:
return spec.Signal
case QueryBuilderQuery[LogAggregation]:
return spec.Signal
case QueryBuilderQuery[MetricAggregation]:
return spec.Signal
}
return telemetrytypes.SignalUnspecified
}
// GetSource returns the source.
func (q *QueryEnvelope) GetSource() telemetrytypes.Source {
switch spec := q.Spec.(type) {
case QueryBuilderQuery[TraceAggregation]:
return spec.Source
case QueryBuilderQuery[LogAggregation]:
return spec.Source
case QueryBuilderQuery[MetricAggregation]:
return spec.Source
}
return telemetrytypes.SourceUnspecified
}
// GetQuery returns the raw query string.
func (q *QueryEnvelope) GetQuery() string {
switch spec := q.Spec.(type) {
case PromQuery:
return spec.Query
case ClickHouseQuery:
return spec.Query
}
return ""
}
// GetStats returns the PromQL stats flag.
func (q *QueryEnvelope) GetStats() bool {
switch spec := q.Spec.(type) {
case PromQuery:
return spec.Stats
}
return false
}
// GetLeft returns the left query reference of a join.
func (q *QueryEnvelope) GetLeft() QueryRef {
switch spec := q.Spec.(type) {
case QueryBuilderJoin:
return spec.Left
}
return QueryRef{}
}
// GetRight returns the right query reference of a join.
func (q *QueryEnvelope) GetRight() QueryRef {
switch spec := q.Spec.(type) {
case QueryBuilderJoin:
return spec.Right
}
return QueryRef{}
}
// GetJoinType returns the join type.
func (q *QueryEnvelope) GetJoinType() JoinType {
switch spec := q.Spec.(type) {
case QueryBuilderJoin:
return spec.Type
}
return JoinType{}
}
// GetOn returns the join ON condition.
func (q *QueryEnvelope) GetOn() string {
switch spec := q.Spec.(type) {
case QueryBuilderJoin:
return spec.On
}
return ""
}
// GetQueryName returns the name of the spec.
func (q *QueryEnvelope) GetQueryName() string {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
return spec.Name
case QueryBuilderQuery[TraceAggregation]:
return spec.Name
case QueryBuilderQuery[LogAggregation]:
return spec.Name
case QueryBuilderQuery[MetricAggregation]:
return spec.Name
case QueryBuilderFormula:
return spec.Name
case QueryBuilderJoin:
return spec.Name
case PromQuery:
return spec.Name
case ClickHouseQuery:
return spec.Name
}
return ""
}
// IsDisabled returns whether the spec is disabled.
func (q *QueryEnvelope) IsDisabled() bool {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
return spec.Disabled
case QueryBuilderQuery[TraceAggregation]:
return spec.Disabled
case QueryBuilderQuery[LogAggregation]:
return spec.Disabled
case QueryBuilderQuery[MetricAggregation]:
return spec.Disabled
case QueryBuilderFormula:
return spec.Disabled
case QueryBuilderJoin:
return spec.Disabled
case PromQuery:
return spec.Disabled
case ClickHouseQuery:
return spec.Disabled
}
return false
}
// GetLimit returns the row limit.
func (q *QueryEnvelope) GetLimit() int {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
return spec.Limit
case QueryBuilderQuery[TraceAggregation]:
return spec.Limit
case QueryBuilderQuery[LogAggregation]:
return spec.Limit
case QueryBuilderQuery[MetricAggregation]:
return spec.Limit
case QueryBuilderFormula:
return spec.Limit
case QueryBuilderJoin:
return spec.Limit
}
return 0
}
// GetOffset returns the row offset.
func (q *QueryEnvelope) GetOffset() int {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
return spec.Offset
case QueryBuilderQuery[TraceAggregation]:
return spec.Offset
case QueryBuilderQuery[LogAggregation]:
return spec.Offset
case QueryBuilderQuery[MetricAggregation]:
return spec.Offset
}
return 0
}
// GetType returns the QueryType of the envelope.
func (q *QueryEnvelope) GetType() QueryType {
return q.Type
}
// GetOrder returns the order-by clauses.
func (q *QueryEnvelope) GetOrder() []OrderBy {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
return spec.Order
case QueryBuilderQuery[TraceAggregation]:
return spec.Order
case QueryBuilderQuery[LogAggregation]:
return spec.Order
case QueryBuilderQuery[MetricAggregation]:
return spec.Order
case QueryBuilderFormula:
return spec.Order
case QueryBuilderJoin:
return spec.Order
}
return nil
}
// GetGroupBy returns the group-by keys.
func (q *QueryEnvelope) GetGroupBy() []GroupByKey {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
return spec.GroupBy
case QueryBuilderQuery[TraceAggregation]:
return spec.GroupBy
case QueryBuilderQuery[LogAggregation]:
return spec.GroupBy
case QueryBuilderQuery[MetricAggregation]:
return spec.GroupBy
case QueryBuilderJoin:
return spec.GroupBy
}
return nil
}
// GetFilter returns the filter.
func (q *QueryEnvelope) GetFilter() *Filter {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
return spec.Filter
case QueryBuilderQuery[TraceAggregation]:
return spec.Filter
case QueryBuilderQuery[LogAggregation]:
return spec.Filter
case QueryBuilderQuery[MetricAggregation]:
return spec.Filter
case QueryBuilderJoin:
return spec.Filter
}
return nil
}
// GetHaving returns the having clause.
func (q *QueryEnvelope) GetHaving() *Having {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
return spec.Having
case QueryBuilderQuery[TraceAggregation]:
return spec.Having
case QueryBuilderQuery[LogAggregation]:
return spec.Having
case QueryBuilderQuery[MetricAggregation]:
return spec.Having
case QueryBuilderFormula:
return spec.Having
case QueryBuilderJoin:
return spec.Having
}
return nil
}
// GetFunctions returns the post-processing functions.
func (q *QueryEnvelope) GetFunctions() []Function {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
return spec.Functions
case QueryBuilderQuery[TraceAggregation]:
return spec.Functions
case QueryBuilderQuery[LogAggregation]:
return spec.Functions
case QueryBuilderQuery[MetricAggregation]:
return spec.Functions
case QueryBuilderFormula:
return spec.Functions
case QueryBuilderJoin:
return spec.Functions
}
return nil
}
// GetSelectFields returns the selected fields.
func (q *QueryEnvelope) GetSelectFields() []telemetrytypes.TelemetryFieldKey {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
return spec.SelectFields
case QueryBuilderQuery[TraceAggregation]:
return spec.SelectFields
case QueryBuilderQuery[LogAggregation]:
return spec.SelectFields
case QueryBuilderQuery[MetricAggregation]:
return spec.SelectFields
case QueryBuilderJoin:
return spec.SelectFields
}
return nil
}
// GetLegend returns the legend label.
func (q *QueryEnvelope) GetLegend() string {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
return spec.Legend
case QueryBuilderQuery[TraceAggregation]:
return spec.Legend
case QueryBuilderQuery[LogAggregation]:
return spec.Legend
case QueryBuilderQuery[MetricAggregation]:
return spec.Legend
case QueryBuilderFormula:
return spec.Legend
case PromQuery:
return spec.Legend
case ClickHouseQuery:
return spec.Legend
}
return ""
}
// GetCursor returns the pagination cursor.
func (q *QueryEnvelope) GetCursor() string {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
return spec.Cursor
case QueryBuilderQuery[TraceAggregation]:
return spec.Cursor
case QueryBuilderQuery[LogAggregation]:
return spec.Cursor
case QueryBuilderQuery[MetricAggregation]:
return spec.Cursor
}
return ""
}
// GetStepInterval returns the step interval.
func (q *QueryEnvelope) GetStepInterval() Step {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
return spec.StepInterval
case QueryBuilderQuery[TraceAggregation]:
return spec.StepInterval
case QueryBuilderQuery[LogAggregation]:
return spec.StepInterval
case QueryBuilderQuery[MetricAggregation]:
return spec.StepInterval
case PromQuery:
return spec.Step
}
return Step{}
}
// GetSecondaryAggregations returns the secondary aggregations.
func (q *QueryEnvelope) GetSecondaryAggregations() []SecondaryAggregation {
switch spec := q.Spec.(type) {
case QueryBuilderQuery[TraceAggregation]:
return spec.SecondaryAggregations
case QueryBuilderQuery[LogAggregation]:
return spec.SecondaryAggregations
case QueryBuilderQuery[MetricAggregation]:
return spec.SecondaryAggregations
case QueryBuilderJoin:
return spec.SecondaryAggregations
}
return nil
}
// GetLimitBy returns the limit-by configuration.
func (q *QueryEnvelope) GetLimitBy() *LimitBy {
switch spec := q.Spec.(type) {
case QueryBuilderQuery[TraceAggregation]:
return spec.LimitBy
case QueryBuilderQuery[LogAggregation]:
return spec.LimitBy
case QueryBuilderQuery[MetricAggregation]:
return spec.LimitBy
}
return nil
}

View File

@@ -1,452 +0,0 @@
package querybuildertypesv5
import "github.com/SigNoz/signoz/pkg/types/telemetrytypes"
// SetExpression sets the expression string of the spec, if applicable.
func (q *QueryEnvelope) SetExpression(expression string) {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
spec.Expression = expression
q.Spec = spec
case QueryBuilderFormula:
spec.Expression = expression
q.Spec = spec
}
}
// SetReturnSpansFrom sets the return-spans-from value, if applicable.
func (q *QueryEnvelope) SetReturnSpansFrom(returnSpansFrom string) {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
spec.ReturnSpansFrom = returnSpansFrom
q.Spec = spec
}
}
// SetSignal sets the signal of the spec, if applicable.
func (q *QueryEnvelope) SetSignal(signal telemetrytypes.Signal) {
switch spec := q.Spec.(type) {
case QueryBuilderQuery[TraceAggregation]:
spec.Signal = signal
q.Spec = spec
case QueryBuilderQuery[LogAggregation]:
spec.Signal = signal
q.Spec = spec
case QueryBuilderQuery[MetricAggregation]:
spec.Signal = signal
q.Spec = spec
}
}
// SetSource sets the source of the spec, if applicable.
func (q *QueryEnvelope) SetSource(source telemetrytypes.Source) {
switch spec := q.Spec.(type) {
case QueryBuilderQuery[TraceAggregation]:
spec.Source = source
q.Spec = spec
case QueryBuilderQuery[LogAggregation]:
spec.Source = source
q.Spec = spec
case QueryBuilderQuery[MetricAggregation]:
spec.Source = source
q.Spec = spec
}
}
// SetQuery sets the raw query string of the spec, if applicable.
func (q *QueryEnvelope) SetQuery(query string) {
switch spec := q.Spec.(type) {
case PromQuery:
spec.Query = query
q.Spec = spec
case ClickHouseQuery:
spec.Query = query
q.Spec = spec
}
}
// SetStats sets the PromQL stats flag, if applicable.
func (q *QueryEnvelope) SetStats(stats bool) {
switch spec := q.Spec.(type) {
case PromQuery:
spec.Stats = stats
q.Spec = spec
}
}
// SetLeft sets the left query reference of a join, if applicable.
func (q *QueryEnvelope) SetLeft(left QueryRef) {
switch spec := q.Spec.(type) {
case QueryBuilderJoin:
spec.Left = left
q.Spec = spec
}
}
// SetRight sets the right query reference of a join, if applicable.
func (q *QueryEnvelope) SetRight(right QueryRef) {
switch spec := q.Spec.(type) {
case QueryBuilderJoin:
spec.Right = right
q.Spec = spec
}
}
// SetJoinType sets the join type, if applicable.
func (q *QueryEnvelope) SetJoinType(joinType JoinType) {
switch spec := q.Spec.(type) {
case QueryBuilderJoin:
spec.Type = joinType
q.Spec = spec
}
}
// SetOn sets the join ON condition, if applicable.
func (q *QueryEnvelope) SetOn(on string) {
switch spec := q.Spec.(type) {
case QueryBuilderJoin:
spec.On = on
q.Spec = spec
}
}
// SetQueryName sets the name of the spec, if applicable.
func (q *QueryEnvelope) SetQueryName(name string) {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
spec.Name = name
q.Spec = spec
case QueryBuilderQuery[TraceAggregation]:
spec.Name = name
q.Spec = spec
case QueryBuilderQuery[LogAggregation]:
spec.Name = name
q.Spec = spec
case QueryBuilderQuery[MetricAggregation]:
spec.Name = name
q.Spec = spec
case QueryBuilderFormula:
spec.Name = name
q.Spec = spec
case QueryBuilderJoin:
spec.Name = name
q.Spec = spec
case PromQuery:
spec.Name = name
q.Spec = spec
case ClickHouseQuery:
spec.Name = name
q.Spec = spec
}
}
// SetDisabled sets the disabled flag of the spec, if applicable.
func (q *QueryEnvelope) SetDisabled(disabled bool) {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
spec.Disabled = disabled
q.Spec = spec
case QueryBuilderQuery[TraceAggregation]:
spec.Disabled = disabled
q.Spec = spec
case QueryBuilderQuery[LogAggregation]:
spec.Disabled = disabled
q.Spec = spec
case QueryBuilderQuery[MetricAggregation]:
spec.Disabled = disabled
q.Spec = spec
case QueryBuilderFormula:
spec.Disabled = disabled
q.Spec = spec
case QueryBuilderJoin:
spec.Disabled = disabled
q.Spec = spec
case PromQuery:
spec.Disabled = disabled
q.Spec = spec
case ClickHouseQuery:
spec.Disabled = disabled
q.Spec = spec
}
}
// SetLimit sets the row limit of the spec, if applicable.
func (q *QueryEnvelope) SetLimit(limit int) {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
spec.Limit = limit
q.Spec = spec
case QueryBuilderQuery[TraceAggregation]:
spec.Limit = limit
q.Spec = spec
case QueryBuilderQuery[LogAggregation]:
spec.Limit = limit
q.Spec = spec
case QueryBuilderQuery[MetricAggregation]:
spec.Limit = limit
q.Spec = spec
case QueryBuilderFormula:
spec.Limit = limit
q.Spec = spec
case QueryBuilderJoin:
spec.Limit = limit
q.Spec = spec
}
}
// SetOffset sets the row offset of the spec, if applicable.
func (q *QueryEnvelope) SetOffset(offset int) {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
spec.Offset = offset
q.Spec = spec
case QueryBuilderQuery[TraceAggregation]:
spec.Offset = offset
q.Spec = spec
case QueryBuilderQuery[LogAggregation]:
spec.Offset = offset
q.Spec = spec
case QueryBuilderQuery[MetricAggregation]:
spec.Offset = offset
q.Spec = spec
}
}
// SetType sets the QueryType of the envelope.
func (q *QueryEnvelope) SetType(t QueryType) {
q.Type = t
}
// SetOrder sets the order-by clauses of the spec, if applicable.
func (q *QueryEnvelope) SetOrder(order []OrderBy) {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
spec.Order = order
q.Spec = spec
case QueryBuilderQuery[TraceAggregation]:
spec.Order = order
q.Spec = spec
case QueryBuilderQuery[LogAggregation]:
spec.Order = order
q.Spec = spec
case QueryBuilderQuery[MetricAggregation]:
spec.Order = order
q.Spec = spec
case QueryBuilderFormula:
spec.Order = order
q.Spec = spec
case QueryBuilderJoin:
spec.Order = order
q.Spec = spec
}
}
// SetGroupBy sets the group-by keys of the spec, if applicable.
func (q *QueryEnvelope) SetGroupBy(groupBy []GroupByKey) {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
spec.GroupBy = groupBy
q.Spec = spec
case QueryBuilderQuery[TraceAggregation]:
spec.GroupBy = groupBy
q.Spec = spec
case QueryBuilderQuery[LogAggregation]:
spec.GroupBy = groupBy
q.Spec = spec
case QueryBuilderQuery[MetricAggregation]:
spec.GroupBy = groupBy
q.Spec = spec
case QueryBuilderJoin:
spec.GroupBy = groupBy
q.Spec = spec
}
}
// SetFilter sets the filter of the spec, if applicable.
func (q *QueryEnvelope) SetFilter(filter *Filter) {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
spec.Filter = filter
q.Spec = spec
case QueryBuilderQuery[TraceAggregation]:
spec.Filter = filter
q.Spec = spec
case QueryBuilderQuery[LogAggregation]:
spec.Filter = filter
q.Spec = spec
case QueryBuilderQuery[MetricAggregation]:
spec.Filter = filter
q.Spec = spec
case QueryBuilderJoin:
spec.Filter = filter
q.Spec = spec
}
}
// SetHaving sets the having clause of the spec, if applicable.
func (q *QueryEnvelope) SetHaving(having *Having) {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
spec.Having = having
q.Spec = spec
case QueryBuilderQuery[TraceAggregation]:
spec.Having = having
q.Spec = spec
case QueryBuilderQuery[LogAggregation]:
spec.Having = having
q.Spec = spec
case QueryBuilderQuery[MetricAggregation]:
spec.Having = having
q.Spec = spec
case QueryBuilderFormula:
spec.Having = having
q.Spec = spec
case QueryBuilderJoin:
spec.Having = having
q.Spec = spec
}
}
// SetFunctions sets the post-processing functions of the spec, if applicable.
func (q *QueryEnvelope) SetFunctions(functions []Function) {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
spec.Functions = functions
q.Spec = spec
case QueryBuilderQuery[TraceAggregation]:
spec.Functions = functions
q.Spec = spec
case QueryBuilderQuery[LogAggregation]:
spec.Functions = functions
q.Spec = spec
case QueryBuilderQuery[MetricAggregation]:
spec.Functions = functions
q.Spec = spec
case QueryBuilderFormula:
spec.Functions = functions
q.Spec = spec
case QueryBuilderJoin:
spec.Functions = functions
q.Spec = spec
}
}
// SetSelectFields sets the selected fields of the spec, if applicable.
func (q *QueryEnvelope) SetSelectFields(fields []telemetrytypes.TelemetryFieldKey) {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
spec.SelectFields = fields
q.Spec = spec
case QueryBuilderQuery[TraceAggregation]:
spec.SelectFields = fields
q.Spec = spec
case QueryBuilderQuery[LogAggregation]:
spec.SelectFields = fields
q.Spec = spec
case QueryBuilderQuery[MetricAggregation]:
spec.SelectFields = fields
q.Spec = spec
case QueryBuilderJoin:
spec.SelectFields = fields
q.Spec = spec
}
}
// SetLegend sets the legend label of the spec, if applicable.
func (q *QueryEnvelope) SetLegend(legend string) {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
spec.Legend = legend
q.Spec = spec
case QueryBuilderQuery[TraceAggregation]:
spec.Legend = legend
q.Spec = spec
case QueryBuilderQuery[LogAggregation]:
spec.Legend = legend
q.Spec = spec
case QueryBuilderQuery[MetricAggregation]:
spec.Legend = legend
q.Spec = spec
case QueryBuilderFormula:
spec.Legend = legend
q.Spec = spec
case PromQuery:
spec.Legend = legend
q.Spec = spec
case ClickHouseQuery:
spec.Legend = legend
q.Spec = spec
}
}
// SetCursor sets the pagination cursor of the spec, if applicable.
func (q *QueryEnvelope) SetCursor(cursor string) {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
spec.Cursor = cursor
q.Spec = spec
case QueryBuilderQuery[TraceAggregation]:
spec.Cursor = cursor
q.Spec = spec
case QueryBuilderQuery[LogAggregation]:
spec.Cursor = cursor
q.Spec = spec
case QueryBuilderQuery[MetricAggregation]:
spec.Cursor = cursor
q.Spec = spec
}
}
// SetStepInterval sets the step interval of the spec, if applicable.
func (q *QueryEnvelope) SetStepInterval(step Step) {
switch spec := q.Spec.(type) {
case QueryBuilderTraceOperator:
spec.StepInterval = step
q.Spec = spec
case QueryBuilderQuery[TraceAggregation]:
spec.StepInterval = step
q.Spec = spec
case QueryBuilderQuery[LogAggregation]:
spec.StepInterval = step
q.Spec = spec
case QueryBuilderQuery[MetricAggregation]:
spec.StepInterval = step
q.Spec = spec
case PromQuery:
spec.Step = step
q.Spec = spec
}
}
// SetSecondaryAggregations sets the secondary aggregations of the spec, if applicable.
func (q *QueryEnvelope) SetSecondaryAggregations(secondaryAggregations []SecondaryAggregation) {
switch spec := q.Spec.(type) {
case QueryBuilderQuery[TraceAggregation]:
spec.SecondaryAggregations = secondaryAggregations
q.Spec = spec
case QueryBuilderQuery[LogAggregation]:
spec.SecondaryAggregations = secondaryAggregations
q.Spec = spec
case QueryBuilderQuery[MetricAggregation]:
spec.SecondaryAggregations = secondaryAggregations
q.Spec = spec
case QueryBuilderJoin:
spec.SecondaryAggregations = secondaryAggregations
q.Spec = spec
}
}
// SetLimitBy sets the limit-by configuration of the spec, if applicable.
func (q *QueryEnvelope) SetLimitBy(limitBy *LimitBy) {
switch spec := q.Spec.(type) {
case QueryBuilderQuery[TraceAggregation]:
spec.LimitBy = limitBy
q.Spec = spec
case QueryBuilderQuery[LogAggregation]:
spec.LimitBy = limitBy
q.Spec = spec
case QueryBuilderQuery[MetricAggregation]:
spec.LimitBy = limitBy
q.Spec = spec
}
}

View File

@@ -10,9 +10,55 @@ import (
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
)
// queryName returns the name from any query envelope spec type.
func (e QueryEnvelope) queryName() string {
switch spec := e.Spec.(type) {
case QueryBuilderQuery[TraceAggregation]:
return spec.Name
case QueryBuilderQuery[LogAggregation]:
return spec.Name
case QueryBuilderQuery[MetricAggregation]:
return spec.Name
case QueryBuilderFormula:
return spec.Name
case QueryBuilderTraceOperator:
return spec.Name
case QueryBuilderJoin:
return spec.Name
case PromQuery:
return spec.Name
case ClickHouseQuery:
return spec.Name
}
return ""
}
// isDisabled returns the disabled status from any query envelope spec type.
func (e QueryEnvelope) isDisabled() bool {
switch spec := e.Spec.(type) {
case QueryBuilderQuery[TraceAggregation]:
return spec.Disabled
case QueryBuilderQuery[LogAggregation]:
return spec.Disabled
case QueryBuilderQuery[MetricAggregation]:
return spec.Disabled
case QueryBuilderFormula:
return spec.Disabled
case QueryBuilderTraceOperator:
return spec.Disabled
case QueryBuilderJoin:
return spec.Disabled
case PromQuery:
return spec.Disabled
case ClickHouseQuery:
return spec.Disabled
}
return false
}
// getQueryIdentifier returns a friendly identifier for a query based on its type and name/content
func getQueryIdentifier(envelope QueryEnvelope, index int) string {
name := envelope.GetQueryName()
name := envelope.queryName()
var typeLabel string
switch envelope.Type {
@@ -43,115 +89,50 @@ const (
MaxQueryLimit = 10000
)
// ValidationOption is a functional option for configuring validation behaviour.
type ValidationOption func(*validationConfig)
type validationConfig struct {
skipLimitOffsetValidation bool
skipAggregationValidation bool
skipHavingValidation bool
skipAggregationOrderBy bool
skipSelectFieldValidation bool
skipGroupByValidation bool
}
func applyValidationOptions(opts []ValidationOption) validationConfig {
cfg := validationConfig{}
for _, opt := range opts {
opt(&cfg)
}
return cfg
}
// SkipLimitOffsetValidation returns a ValidationOption that skips the limit and offset range checks.
// Use this when the caller has already validated limits and offsets with different constraints.
func WithSkipLimitOffsetValidation() ValidationOption {
return func(cfg *validationConfig) {
cfg.skipLimitOffsetValidation = true
}
}
// SkipAggregationValidation skips aggregation validation.
// Used for raw/trace request types where aggregations are not required.
func WithSkipAggregationValidation() ValidationOption {
return func(cfg *validationConfig) {
cfg.skipAggregationValidation = true
}
}
// SkipHavingValidation skips having-clause validation.
// Used for raw/trace request types where having clauses do not apply.
func WithSkipHavingValidation() ValidationOption {
return func(cfg *validationConfig) {
cfg.skipHavingValidation = true
}
}
// SkipAggregationOrderBy skips the aggregation-specific order-by key validation.
// Used for raw/trace request types where order-by keys are not restricted to group-by or aggregation keys.
func WithSkipAggregationOrderBy() ValidationOption {
return func(cfg *validationConfig) {
cfg.skipAggregationOrderBy = true
}
}
// SkipSelectFieldValidation skips select-field validation.
// Used for aggregation request types where select fields do not apply.
func WithSkipSelectFieldValidation() ValidationOption {
return func(cfg *validationConfig) {
cfg.skipSelectFieldValidation = true
}
}
// SkipGroupByValidation skips group-by validation.
// Used for raw/trace request types where group-by does not apply.
func WithSkipGroupByValidation() ValidationOption {
return func(cfg *validationConfig) {
cfg.skipGroupByValidation = true
}
}
// Validate performs preliminary validation on QueryBuilderQuery.
func (q *QueryBuilderQuery[T]) Validate(opts ...ValidationOption) error {
cfg := applyValidationOptions(opts)
// Validate performs preliminary validation on QueryBuilderQuery
func (q *QueryBuilderQuery[T]) Validate(requestType RequestType) error {
// Validate signal
if err := q.validateSignal(); err != nil {
return err
}
if err := q.validateAggregations(cfg); err != nil {
if err := q.validateAggregations(requestType); err != nil {
return err
}
if err := q.validateGroupBy(cfg); err != nil {
if err := q.validateGroupBy(requestType); err != nil {
return err
}
if err := q.validateLimitAndPagination(cfg); err != nil {
// Validate limit and pagination
if err := q.validateLimitAndPagination(); err != nil {
return err
}
// Validate functions
if err := q.validateFunctions(); err != nil {
return err
}
// Validate secondary aggregations
if err := q.validateSecondaryAggregations(); err != nil {
return err
}
if err := q.validateOrderBy(cfg); err != nil {
if err := q.validateOrderBy(requestType); err != nil {
return err
}
if err := q.validateSelectFields(cfg); err != nil {
if err := q.validateSelectFields(requestType); err != nil {
return err
}
return nil
}
func (q *QueryBuilderQuery[T]) validateSelectFields(cfg validationConfig) error {
if cfg.skipSelectFieldValidation {
func (q *QueryBuilderQuery[T]) validateSelectFields(requestType RequestType) error {
// selectFields don't apply to aggregation queries, skip validation
if requestType.IsAggregation() {
return nil
}
@@ -167,8 +148,9 @@ func (q *QueryBuilderQuery[T]) validateSelectFields(cfg validationConfig) error
return nil
}
func (q *QueryBuilderQuery[T]) validateGroupBy(cfg validationConfig) error {
if cfg.skipGroupByValidation {
func (q *QueryBuilderQuery[T]) validateGroupBy(requestType RequestType) error {
// groupBy doesn't apply to non-aggregation queries, skip validation
if !requestType.IsAggregation() {
return nil
}
for idx, item := range q.GroupBy {
@@ -201,8 +183,9 @@ func (q *QueryBuilderQuery[T]) validateSignal() error {
}
}
func (q *QueryBuilderQuery[T]) validateAggregations(cfg validationConfig) error {
if cfg.skipAggregationValidation {
func (q *QueryBuilderQuery[T]) validateAggregations(requestType RequestType) error {
// aggregations don't apply to non-aggregation queries, skip validation
if !requestType.IsAggregation() {
return nil
}
@@ -289,11 +272,8 @@ func (q *QueryBuilderQuery[T]) validateAggregations(cfg validationConfig) error
return nil
}
func (q *QueryBuilderQuery[T]) validateLimitAndPagination(cfg validationConfig) error {
if cfg.skipLimitOffsetValidation {
return nil
}
func (q *QueryBuilderQuery[T]) validateLimitAndPagination() error {
// Validate limit
if q.Limit < 0 {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
@@ -356,7 +336,7 @@ func (q *QueryBuilderQuery[T]) validateSecondaryAggregations() error {
return nil
}
func (q *QueryBuilderQuery[T]) validateOrderBy(cfg validationConfig) error {
func (q *QueryBuilderQuery[T]) validateOrderBy(requestType RequestType) error {
for i, order := range q.Order {
// Direction validation is handled by the OrderDirection type
if order.Direction != OrderDirectionAsc && order.Direction != OrderDirectionDesc {
@@ -375,7 +355,8 @@ func (q *QueryBuilderQuery[T]) validateOrderBy(cfg validationConfig) error {
}
}
if !cfg.skipAggregationOrderBy {
// aggregation-specific order key validation only applies to aggregation queries
if requestType.IsAggregation() {
return q.validateOrderByForAggregation()
}
@@ -457,8 +438,8 @@ func (q *QueryBuilderQuery[T]) validateOrderByForAggregation() error {
return nil
}
// Validate validates the entire query range request.
func (r *QueryRangeRequest) Validate(opts ...ValidationOption) error {
// ValidateQueryRangeRequest validates the entire query range request
func (r *QueryRangeRequest) Validate() error {
// Validate time range
if r.RequestType != RequestTypeRawStream && r.Start >= r.End {
return errors.NewInvalidInputf(
@@ -469,8 +450,8 @@ func (r *QueryRangeRequest) Validate(opts ...ValidationOption) error {
// Validate request type
switch r.RequestType {
case RequestTypeRaw, RequestTypeRawStream, RequestTypeTrace, RequestTypeTimeSeries, RequestTypeScalar:
opts = append(opts, getValidationOptions(r.RequestType)...)
case RequestTypeRaw, RequestTypeRawStream, RequestTypeTimeSeries, RequestTypeScalar, RequestTypeTrace:
// Valid request types
default:
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
@@ -482,7 +463,7 @@ func (r *QueryRangeRequest) Validate(opts ...ValidationOption) error {
}
// Validate composite query
if err := r.CompositeQuery.Validate(opts...); err != nil {
if err := r.validateCompositeQuery(); err != nil {
return err
}
@@ -497,7 +478,7 @@ func (r *QueryRangeRequest) Validate(opts ...ValidationOption) error {
// validateAllQueriesNotDisabled validates that at least one query in the composite query is enabled
func (r *QueryRangeRequest) validateAllQueriesNotDisabled() error {
for _, envelope := range r.CompositeQuery.Queries {
if !envelope.IsDisabled() {
if !envelope.isDisabled() {
return nil
}
}
@@ -508,8 +489,12 @@ func (r *QueryRangeRequest) validateAllQueriesNotDisabled() error {
)
}
func (r *QueryRangeRequest) validateCompositeQuery() error {
return r.CompositeQuery.Validate(r.RequestType)
}
// Validate performs validation on CompositeQuery
func (c *CompositeQuery) Validate(opts ...ValidationOption) error {
func (c *CompositeQuery) Validate(requestType RequestType) error {
if len(c.Queries) == 0 {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
@@ -521,14 +506,14 @@ func (c *CompositeQuery) Validate(opts ...ValidationOption) error {
queryNames := make(map[string]bool)
for i, envelope := range c.Queries {
if err := validateQueryEnvelope(envelope, opts...); err != nil {
if err := validateQueryEnvelope(envelope, requestType); err != nil {
queryId := getQueryIdentifier(envelope, i)
return wrapValidationError(err, queryId, "invalid %s: %s")
}
// Check name uniqueness for builder queries
if envelope.Type == QueryTypeBuilder || envelope.Type == QueryTypeSubQuery {
name := envelope.GetQueryName()
name := envelope.queryName()
if name != "" {
if queryNames[name] {
return errors.NewInvalidInputf(
@@ -545,16 +530,16 @@ func (c *CompositeQuery) Validate(opts ...ValidationOption) error {
return nil
}
func validateQueryEnvelope(envelope QueryEnvelope, opts ...ValidationOption) error {
func validateQueryEnvelope(envelope QueryEnvelope, requestType RequestType) error {
switch envelope.Type {
case QueryTypeBuilder, QueryTypeSubQuery:
switch spec := envelope.Spec.(type) {
case QueryBuilderQuery[TraceAggregation]:
return spec.Validate(opts...)
return spec.Validate(requestType)
case QueryBuilderQuery[LogAggregation]:
return spec.Validate(opts...)
return spec.Validate(requestType)
case QueryBuilderQuery[MetricAggregation]:
return spec.Validate(opts...)
return spec.Validate(requestType)
default:
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
@@ -640,14 +625,3 @@ func validateQueryEnvelope(envelope QueryEnvelope, opts ...ValidationOption) err
)
}
}
func getValidationOptions(requestType RequestType) []ValidationOption {
switch requestType {
case RequestTypeTimeSeries, RequestTypeScalar:
return []ValidationOption{WithSkipSelectFieldValidation()}
case RequestTypeRaw, RequestTypeRawStream, RequestTypeTrace:
return []ValidationOption{WithSkipAggregationValidation(), WithSkipHavingValidation(), WithSkipAggregationOrderBy(), WithSkipGroupByValidation()}
default:
return []ValidationOption{}
}
}

View File

@@ -743,7 +743,7 @@ func TestValidateQueryEnvelope(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validateQueryEnvelope(tt.envelope)
err := validateQueryEnvelope(tt.envelope, tt.requestType)
if tt.wantErr {
if err == nil {
t.Errorf("validateQueryEnvelope() expected error but got none")
@@ -816,7 +816,7 @@ func TestQueryEnvelope_Helpers(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := tt.envelope.GetQueryName()
got := tt.envelope.queryName()
if got != tt.want {
t.Errorf("queryName() = %q, want %q", got, tt.want)
}
@@ -868,7 +868,7 @@ func TestQueryEnvelope_Helpers(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := tt.envelope.IsDisabled()
got := tt.envelope.isDisabled()
if got != tt.want {
t.Errorf("isDisabled() = %v, want %v", got, tt.want)
}
@@ -1107,7 +1107,7 @@ func TestQueryRangeRequest_ValidateOrderByForAggregation(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := tt.query.Validate(getValidationOptions(RequestTypeTimeSeries)...)
err := tt.query.Validate(RequestTypeTimeSeries)
if tt.wantErr {
if err == nil {
t.Errorf("validateOrderByForAggregation() expected error but got none")
@@ -1161,7 +1161,7 @@ func TestNonAggregationFieldsSkipped(t *testing.T) {
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
},
}
err := query.Validate(getValidationOptions(RequestTypeRaw)...)
err := query.Validate(RequestTypeRaw)
if err != nil {
t.Errorf("expected no error for groupBy with raw request type, got: %v", err)
}
@@ -1178,7 +1178,7 @@ func TestNonAggregationFieldsSkipped(t *testing.T) {
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: ""}},
},
}
err := query.Validate(getValidationOptions(RequestTypeTimeSeries)...)
err := query.Validate(RequestTypeTimeSeries)
if err == nil {
t.Errorf("expected error for empty groupBy key with timeseries request type")
}
@@ -1190,7 +1190,7 @@ func TestNonAggregationFieldsSkipped(t *testing.T) {
Signal: telemetrytypes.SignalLogs,
Having: &Having{Expression: "count() > 10"},
}
err := query.Validate(getValidationOptions(RequestTypeRaw)...)
err := query.Validate(RequestTypeRaw)
if err != nil {
t.Errorf("expected no error for having with raw request type, got: %v", err)
}
@@ -1202,7 +1202,7 @@ func TestNonAggregationFieldsSkipped(t *testing.T) {
Signal: telemetrytypes.SignalTraces,
Having: &Having{Expression: "count() > 10"},
}
err := query.Validate(getValidationOptions(RequestTypeTrace)...)
err := query.Validate(RequestTypeTrace)
if err != nil {
t.Errorf("expected no error for having with trace request type, got: %v", err)
}
@@ -1216,7 +1216,7 @@ func TestNonAggregationFieldsSkipped(t *testing.T) {
{Expression: "count()"},
},
}
err := query.Validate(getValidationOptions(RequestTypeRaw)...)
err := query.Validate(RequestTypeRaw)
if err != nil {
t.Errorf("expected no error for aggregations with raw request type, got: %v", err)
}
@@ -1230,7 +1230,7 @@ func TestNonAggregationFieldsSkipped(t *testing.T) {
{Expression: "count()"},
},
}
err := query.Validate(getValidationOptions(RequestTypeRawStream)...)
err := query.Validate(RequestTypeRawStream)
if err != nil {
t.Errorf("expected no error for aggregations with raw_stream request type, got: %v", err)
}
@@ -1248,12 +1248,12 @@ func TestNonAggregationFieldsSkipped(t *testing.T) {
},
}
// Should error for raw (selectFields are validated)
err := query.Validate(getValidationOptions(RequestTypeRaw)...)
err := query.Validate(RequestTypeRaw)
if err == nil {
t.Errorf("expected error for isRoot in selectFields with raw request type")
}
// Should pass for timeseries (selectFields skipped)
err = query.Validate(getValidationOptions(RequestTypeTimeSeries)...)
err = query.Validate(RequestTypeTimeSeries)
if err != nil {
t.Errorf("expected no error for isRoot in selectFields with timeseries request type, got: %v", err)
}

View File

@@ -1,8 +1,10 @@
package zeustypes
import (
"fmt"
"net/url"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/tidwall/gjson"
)
@@ -35,6 +37,27 @@ type Host struct {
URL string `json:"url" required:"true"`
}
// GettableDeployment represents the parsed deployment info from zeus.GetDeployment.
type GettableDeployment struct {
Name string
SignozAPIUrl string
}
// NewGettableDeployment parses raw GetDeployment bytes into a GettableDeployment.
func NewGettableDeployment(data []byte) (*GettableDeployment, error) {
parsed := gjson.ParseBytes(data)
name := parsed.Get("name").String()
dns := parsed.Get("cluster.region.dns").String()
if name == "" || dns == "" {
return nil, errors.NewInternalf(errors.CodeInternal,
"deployment info response missing name or cluster region dns")
}
return &GettableDeployment{
Name: name,
SignozAPIUrl: fmt.Sprintf("https://%s.%s", name, dns),
}, nil
}
func NewGettableHost(data []byte) *GettableHost {
parsed := gjson.ParseBytes(data)
dns := parsed.Get("cluster.region.dns").String()

View File

@@ -1,634 +0,0 @@
import csv
import io
import json
from datetime import datetime, timedelta, timezone
from http import HTTPStatus
from typing import Callable, List
from urllib.parse import urlencode
import requests
from fixtures import types
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.logs import Logs
def test_export_logs_csv(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_logs: Callable[[List[Logs]], None],
) -> None:
"""
Setup:
Insert 3 logs with different severity levels and attributes.
Tests:
1. Export logs as CSV format
2. Verify CSV structure and content
3. Validate headers are present
4. Check log data is correctly formatted
"""
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
insert_logs(
[
Logs(
timestamp=now - timedelta(seconds=10),
body="Application started successfully",
severity_text="INFO",
resources={
"service.name": "api-service",
"deployment.environment": "production",
"host.name": "server-01",
},
attributes={
"http.method": "GET",
"http.status_code": 200,
"user.id": "user123",
},
),
Logs(
timestamp=now - timedelta(seconds=8),
body="Connection to database failed",
severity_text="ERROR",
resources={
"service.name": "api-service",
"deployment.environment": "production",
"host.name": "server-01",
},
attributes={
"error.type": "ConnectionError",
"db.name": "production_db",
},
),
Logs(
timestamp=now - timedelta(seconds=5),
body="Request processed",
severity_text="DEBUG",
resources={
"service.name": "worker-service",
"deployment.environment": "production",
"host.name": "server-02",
},
attributes={
"request.id": "req-456",
"duration_ms": 150.5,
},
),
]
)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
# Calculate timestamps in nanoseconds
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
end_ns = int(now.timestamp() * 1e9)
params = {
"start": start_ns,
"end": end_ns,
}
# Export logs as CSV (default format, no source needed)
response = requests.get(
signoz.self.host_configs["8080"].get(
f"/api/v1/export_raw_data?{urlencode(params)}"
),
timeout=30,
headers={
"authorization": f"Bearer {token}",
},
)
assert response.status_code == HTTPStatus.OK
assert response.headers["Content-Type"] == "text/csv"
assert "attachment" in response.headers.get("Content-Disposition", "")
# Parse CSV content
csv_content = response.text
csv_reader = csv.DictReader(io.StringIO(csv_content))
rows = list(csv_reader)
assert len(rows) == 3, f"Expected 3 rows, got {len(rows)}"
# Verify log bodies are present in the exported data
bodies = [row.get("body") for row in rows]
assert "Application started successfully" in bodies
assert "Connection to database failed" in bodies
assert "Request processed" in bodies
# Verify severity levels
severities = [row.get("severity_text") for row in rows]
assert "INFO" in severities
assert "ERROR" in severities
assert "DEBUG" in severities
def test_export_logs_jsonl(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_logs: Callable[[List[Logs]], None],
) -> None:
"""
Setup:
Insert 2 logs with different attributes.
Tests:
1. Export logs as JSONL format
2. Verify JSONL structure and content
3. Check each line is valid JSON
4. Validate log data is correctly formatted
"""
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
insert_logs(
[
Logs(
timestamp=now - timedelta(seconds=10),
body="User logged in",
severity_text="INFO",
resources={
"service.name": "auth-service",
"deployment.environment": "staging",
},
attributes={
"user.email": "test@example.com",
"session.id": "sess-789",
},
),
Logs(
timestamp=now - timedelta(seconds=5),
body="Payment processed successfully",
severity_text="INFO",
resources={
"service.name": "payment-service",
"deployment.environment": "staging",
},
attributes={
"transaction.id": "txn-123",
"amount": 99.99,
},
),
]
)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
# Calculate timestamps in nanoseconds
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
end_ns = int(now.timestamp() * 1e9)
params = {
"start": start_ns,
"end": end_ns,
"format": "jsonl",
"source": "logs",
}
# Export logs as JSONL
response = requests.get(
signoz.self.host_configs["8080"].get(
f"/api/v1/export_raw_data?{urlencode(params)}"
),
timeout=10,
headers={
"authorization": f"Bearer {token}",
},
)
assert response.status_code == HTTPStatus.OK
assert response.headers["Content-Type"] == "application/x-ndjson"
assert "attachment" in response.headers.get("Content-Disposition", "")
# Parse JSONL content
jsonl_lines = response.text.strip().split("\n")
assert len(jsonl_lines) == 2, f"Expected 2 lines, got {len(jsonl_lines)}"
# Verify each line is valid JSON
json_objects = []
for line in jsonl_lines:
obj = json.loads(line)
json_objects.append(obj)
assert "id" in obj
assert "timestamp" in obj
assert "body" in obj
assert "severity_text" in obj
# Verify log bodies
bodies = [obj.get("body") for obj in json_objects]
assert "User logged in" in bodies
assert "Payment processed successfully" in bodies
def test_export_logs_with_filter(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_logs: Callable[[List[Logs]], None],
) -> None:
"""
Setup:
Insert logs with different severity levels.
Tests:
1. Export logs with filter applied
2. Verify only filtered logs are returned
"""
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
insert_logs(
[
Logs(
timestamp=now - timedelta(seconds=10),
body="Info message",
severity_text="INFO",
resources={
"service.name": "test-service",
},
attributes={},
),
Logs(
timestamp=now - timedelta(seconds=8),
body="Error message",
severity_text="ERROR",
resources={
"service.name": "test-service",
},
attributes={},
),
Logs(
timestamp=now - timedelta(seconds=5),
body="Another error message",
severity_text="ERROR",
resources={
"service.name": "test-service",
},
attributes={},
),
]
)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
# Calculate timestamps in nanoseconds
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
end_ns = int(now.timestamp() * 1e9)
params = {
"start": start_ns,
"end": end_ns,
"format": "jsonl",
"source": "logs",
"filter": "severity_text = 'ERROR'",
}
# Export logs with filter
response = requests.get(
signoz.self.host_configs["8080"].get(
f"/api/v1/export_raw_data?{urlencode(params)}"
),
timeout=10,
headers={
"authorization": f"Bearer {token}",
},
)
assert response.status_code == HTTPStatus.OK
assert response.headers["Content-Type"] == "application/x-ndjson"
# Parse JSONL content
jsonl_lines = response.text.strip().split("\n")
assert len(jsonl_lines) == 2, f"Expected 2 lines (filtered), got {len(jsonl_lines)}"
# Verify only ERROR logs are returned
for line in jsonl_lines:
obj = json.loads(line)
assert obj["severity_text"] == "ERROR"
assert "error message" in obj["body"].lower()
def test_export_logs_with_limit(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_logs: Callable[[List[Logs]], None],
) -> None:
"""
Setup:
Insert 5 logs.
Tests:
1. Export logs with limit applied
2. Verify only limited number of logs are returned
"""
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
logs = []
for i in range(5):
logs.append(
Logs(
timestamp=now - timedelta(seconds=i),
body=f"Log message {i}",
severity_text="INFO",
resources={
"service.name": "test-service",
},
attributes={
"index": i,
},
)
)
insert_logs(logs)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
# Calculate timestamps in nanoseconds
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
end_ns = int(now.timestamp() * 1e9)
params = {
"start": start_ns,
"end": end_ns,
"format": "csv",
"source": "logs",
"limit": 3,
}
# Export logs with limit
response = requests.get(
signoz.self.host_configs["8080"].get(
f"/api/v1/export_raw_data?{urlencode(params)}"
),
timeout=10,
headers={
"authorization": f"Bearer {token}",
},
)
assert response.status_code == HTTPStatus.OK
assert response.headers["Content-Type"] == "text/csv"
# Parse CSV content
csv_content = response.text
csv_reader = csv.DictReader(io.StringIO(csv_content))
rows = list(csv_reader)
assert len(rows) == 3, f"Expected 3 rows (limited), got {len(rows)}"
def test_export_logs_with_columns(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_logs: Callable[[List[Logs]], None],
) -> None:
"""
Setup:
Insert logs with various attributes.
Tests:
1. Export logs with specific columns
2. Verify only specified columns are returned
"""
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
insert_logs(
[
Logs(
timestamp=now - timedelta(seconds=10),
body="Test log message",
severity_text="INFO",
resources={
"service.name": "test-service",
"deployment.environment": "production",
},
attributes={
"http.method": "GET",
"http.status_code": 200,
},
),
]
)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
# Calculate timestamps in nanoseconds
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
end_ns = int(now.timestamp() * 1e9)
# Request only specific columns
params = {
"start": start_ns,
"end": end_ns,
"format": "csv",
"source": "logs",
"columns": ["timestamp", "severity_text", "body"],
}
# Export logs with specific columns
response = requests.get(
signoz.self.host_configs["8080"].get(
f"/api/v1/export_raw_data?{urlencode(params, doseq=True)}"
),
timeout=10,
headers={
"authorization": f"Bearer {token}",
},
)
assert response.status_code == HTTPStatus.OK
assert response.headers["Content-Type"] == "text/csv"
# Parse CSV content
csv_content = response.text
csv_reader = csv.DictReader(io.StringIO(csv_content))
rows = list(csv_reader)
assert len(rows) == 1
# Verify the specified columns are present
row = rows[0]
assert "timestamp" in row
assert "severity_text" in row
assert "body" in row
assert row["severity_text"] == "INFO"
assert row["body"] == "Test log message"
def test_export_logs_with_order_by(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_logs: Callable[[List[Logs]], None],
) -> None:
"""
Setup:
Insert logs at different timestamps.
Tests:
1. Export logs with ascending timestamp order
2. Verify logs are returned in correct order
"""
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
insert_logs(
[
Logs(
timestamp=now - timedelta(seconds=10),
body="First log",
severity_text="INFO",
resources={
"service.name": "test-service",
},
attributes={},
),
Logs(
timestamp=now - timedelta(seconds=5),
body="Second log",
severity_text="INFO",
resources={
"service.name": "test-service",
},
attributes={},
),
Logs(
timestamp=now - timedelta(seconds=1),
body="Third log",
severity_text="INFO",
resources={
"service.name": "test-service",
},
attributes={},
),
]
)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
# Calculate timestamps in nanoseconds
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
end_ns = int(now.timestamp() * 1e9)
params = {
"start": start_ns,
"end": end_ns,
"format": "jsonl",
"source": "logs",
"order_by": "timestamp:asc",
}
# Export logs with ascending order
response = requests.get(
signoz.self.host_configs["8080"].get(
f"/api/v1/export_raw_data?{urlencode(params)}"
),
timeout=10,
headers={
"authorization": f"Bearer {token}",
},
)
assert response.status_code == HTTPStatus.OK
assert response.headers["Content-Type"] == "application/x-ndjson"
# Parse JSONL content
jsonl_lines = response.text.strip().split("\n")
assert len(jsonl_lines) == 3
# Verify order - first log should be "First log" (oldest)
json_objects = [json.loads(line) for line in jsonl_lines]
assert json_objects[0]["body"] == "First log"
assert json_objects[1]["body"] == "Second log"
assert json_objects[2]["body"] == "Third log"
def test_export_logs_with_complex_filter(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_logs: Callable[[List[Logs]], None],
) -> None:
"""
Setup:
Insert logs with various service names and severity levels.
Tests:
1. Export logs with complex filter (multiple conditions)
2. Verify only logs matching all conditions are returned
"""
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
insert_logs(
[
Logs(
timestamp=now - timedelta(seconds=10),
body="API error occurred",
severity_text="ERROR",
resources={
"service.name": "api-service",
},
attributes={},
),
Logs(
timestamp=now - timedelta(seconds=8),
body="Worker info message",
severity_text="INFO",
resources={
"service.name": "worker-service",
},
attributes={},
),
Logs(
timestamp=now - timedelta(seconds=5),
body="API info message",
severity_text="INFO",
resources={
"service.name": "api-service",
},
attributes={},
),
]
)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
# Calculate timestamps in nanoseconds
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
end_ns = int(now.timestamp() * 1e9)
# Filter for api-service AND ERROR severity
params = {
"start": start_ns,
"end": end_ns,
"format": "jsonl",
"source": "logs",
"filter": "service.name = 'api-service' AND severity_text = 'ERROR'",
}
# Export logs with complex filter
response = requests.get(
signoz.self.host_configs["8080"].get(
f"/api/v1/export_raw_data?{urlencode(params)}"
),
timeout=10,
headers={
"authorization": f"Bearer {token}",
},
)
assert response.status_code == HTTPStatus.OK
assert response.headers["Content-Type"] == "application/x-ndjson"
# Parse JSONL content
jsonl_lines = response.text.strip().split("\n")
assert (
len(jsonl_lines) == 1
), f"Expected 1 line (complex filter), got {len(jsonl_lines)}"
# Verify the filtered log
filtered_obj = json.loads(jsonl_lines[0])
assert filtered_obj["body"] == "API error occurred"
assert filtered_obj["severity_text"] == "ERROR"

View File

@@ -1,782 +0,0 @@
import csv
import io
import json
from datetime import datetime, timedelta, timezone
from http import HTTPStatus
from typing import Callable, List
from urllib.parse import urlencode
import requests
from fixtures import types
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.traces import TraceIdGenerator, Traces, TracesKind, TracesStatusCode
def test_export_traces_csv(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_traces: Callable[[List[Traces]], None],
) -> None:
"""
Setup:
Insert 3 traces with different attributes.
Tests:
1. Export traces as CSV format
2. Verify CSV structure and content
3. Validate headers are present
4. Check trace data is correctly formatted
"""
http_service_trace_id = TraceIdGenerator.trace_id()
http_service_span_id = TraceIdGenerator.span_id()
http_service_db_span_id = TraceIdGenerator.span_id()
topic_service_trace_id = TraceIdGenerator.trace_id()
topic_service_span_id = TraceIdGenerator.span_id()
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
insert_traces(
[
Traces(
timestamp=now - timedelta(seconds=4),
duration=timedelta(seconds=3),
trace_id=http_service_trace_id,
span_id=http_service_span_id,
parent_span_id="",
name="POST /integration",
kind=TracesKind.SPAN_KIND_SERVER,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={
"deployment.environment": "production",
"service.name": "http-service",
"os.type": "linux",
"host.name": "linux-000",
},
attributes={
"net.transport": "IP.TCP",
"http.scheme": "http",
"http.user_agent": "Integration Test",
"http.request.method": "POST",
"http.response.status_code": "200",
},
),
Traces(
timestamp=now - timedelta(seconds=3.5),
duration=timedelta(seconds=0.5),
trace_id=http_service_trace_id,
span_id=http_service_db_span_id,
parent_span_id=http_service_span_id,
name="SELECT",
kind=TracesKind.SPAN_KIND_CLIENT,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={
"deployment.environment": "production",
"service.name": "http-service",
"os.type": "linux",
"host.name": "linux-000",
},
attributes={
"db.name": "integration",
"db.operation": "SELECT",
"db.statement": "SELECT * FROM integration",
},
),
Traces(
timestamp=now - timedelta(seconds=1),
duration=timedelta(seconds=2),
trace_id=topic_service_trace_id,
span_id=topic_service_span_id,
parent_span_id="",
name="topic publish",
kind=TracesKind.SPAN_KIND_PRODUCER,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={
"deployment.environment": "production",
"service.name": "topic-service",
"os.type": "linux",
"host.name": "linux-001",
},
attributes={
"message.type": "SENT",
"messaging.operation": "publish",
"messaging.message.id": "001",
},
),
]
)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
# Calculate timestamps in nanoseconds
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
end_ns = int(now.timestamp() * 1e9)
params = {
"start": start_ns,
"end": end_ns,
"source": "traces",
"limit": 1000,
}
# Export traces as CSV (GET for simple queries)
response = requests.get(
signoz.self.host_configs["8080"].get(
f"/api/v1/export_raw_data?{urlencode(params)}"
),
timeout=30,
headers={
"authorization": f"Bearer {token}",
},
)
assert response.status_code == HTTPStatus.OK
assert response.headers["Content-Type"] == "text/csv"
assert "attachment" in response.headers.get("Content-Disposition", "")
# Parse CSV content
csv_content = response.text
csv_reader = csv.DictReader(io.StringIO(csv_content))
rows = list(csv_reader)
assert len(rows) == 3, f"Expected 3 rows, got {len(rows)}"
# Verify trace IDs are present in the exported data
trace_ids = [row.get("trace_id") for row in rows]
assert http_service_trace_id in trace_ids
assert topic_service_trace_id in trace_ids
# Verify span names are present
span_names = [row.get("name") for row in rows]
assert "POST /integration" in span_names
assert "SELECT" in span_names
assert "topic publish" in span_names
def test_export_traces_jsonl(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_traces: Callable[[List[Traces]], None],
) -> None:
"""
Setup:
Insert 2 traces with different attributes.
Tests:
1. Export traces as JSONL format
2. Verify JSONL structure and content
3. Check each line is valid JSON
4. Validate trace data is correctly formatted
"""
http_service_trace_id = TraceIdGenerator.trace_id()
http_service_span_id = TraceIdGenerator.span_id()
topic_service_trace_id = TraceIdGenerator.trace_id()
topic_service_span_id = TraceIdGenerator.span_id()
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
insert_traces(
[
Traces(
timestamp=now - timedelta(seconds=4),
duration=timedelta(seconds=3),
trace_id=http_service_trace_id,
span_id=http_service_span_id,
parent_span_id="",
name="POST /api/test",
kind=TracesKind.SPAN_KIND_SERVER,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={
"service.name": "api-service",
"deployment.environment": "staging",
},
attributes={
"http.request.method": "POST",
"http.response.status_code": "201",
},
),
Traces(
timestamp=now - timedelta(seconds=2),
duration=timedelta(seconds=1),
trace_id=topic_service_trace_id,
span_id=topic_service_span_id,
parent_span_id="",
name="queue.process",
kind=TracesKind.SPAN_KIND_CONSUMER,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={
"service.name": "queue-service",
"deployment.environment": "staging",
},
attributes={
"messaging.operation": "process",
"messaging.system": "rabbitmq",
},
),
]
)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
# Calculate timestamps in nanoseconds
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
end_ns = int(now.timestamp() * 1e9)
params = {
"start": start_ns,
"end": end_ns,
"format": "jsonl",
"source": "traces",
"limit": 1000,
}
# Export traces as JSONL (GET for simple queries)
response = requests.get(
signoz.self.host_configs["8080"].get(
f"/api/v1/export_raw_data?{urlencode(params)}"
),
timeout=10,
headers={
"authorization": f"Bearer {token}",
},
)
assert response.status_code == HTTPStatus.OK
assert response.headers["Content-Type"] == "application/x-ndjson"
assert "attachment" in response.headers.get("Content-Disposition", "")
# Parse JSONL content
jsonl_lines = response.text.strip().split("\n")
assert len(jsonl_lines) == 2, f"Expected 2 lines, got {len(jsonl_lines)}"
# Verify each line is valid JSON
json_objects = []
for line in jsonl_lines:
obj = json.loads(line)
json_objects.append(obj)
assert "trace_id" in obj
assert "span_id" in obj
assert "name" in obj
# Verify trace IDs are present
trace_ids = [obj.get("trace_id") for obj in json_objects]
assert http_service_trace_id in trace_ids
assert topic_service_trace_id in trace_ids
# Verify span names are present
span_names = [obj.get("name") for obj in json_objects]
assert "POST /api/test" in span_names
assert "queue.process" in span_names
def test_export_traces_with_filter(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_traces: Callable[[List[Traces]], None],
) -> None:
"""
Setup:
Insert traces with different service names.
Tests:
1. Export traces with filter applied
2. Verify only filtered traces are returned
"""
service_a_trace_id = TraceIdGenerator.trace_id()
service_a_span_id = TraceIdGenerator.span_id()
service_b_trace_id = TraceIdGenerator.trace_id()
service_b_span_id = TraceIdGenerator.span_id()
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
insert_traces(
[
Traces(
timestamp=now - timedelta(seconds=4),
duration=timedelta(seconds=1),
trace_id=service_a_trace_id,
span_id=service_a_span_id,
parent_span_id="",
name="operation-a",
kind=TracesKind.SPAN_KIND_SERVER,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={
"service.name": "service-a",
},
attributes={},
),
Traces(
timestamp=now - timedelta(seconds=2),
duration=timedelta(seconds=1),
trace_id=service_b_trace_id,
span_id=service_b_span_id,
parent_span_id="",
name="operation-b",
kind=TracesKind.SPAN_KIND_SERVER,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={
"service.name": "service-b",
},
attributes={},
),
]
)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
# Calculate timestamps in nanoseconds
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
end_ns = int(now.timestamp() * 1e9)
params = {
"start": start_ns,
"end": end_ns,
"format": "jsonl",
"source": "traces",
"limit": 1000,
"filter": "service.name = 'service-a'",
}
# Export traces with filter (GET supports filter param)
response = requests.get(
signoz.self.host_configs["8080"].get(
f"/api/v1/export_raw_data?{urlencode(params)}"
),
timeout=10,
headers={
"authorization": f"Bearer {token}",
},
)
assert response.status_code == HTTPStatus.OK
assert response.headers["Content-Type"] == "application/x-ndjson"
# Parse JSONL content
jsonl_lines = response.text.strip().split("\n")
assert len(jsonl_lines) == 1, f"Expected 1 line (filtered), got {len(jsonl_lines)}"
# Verify the filtered trace
filtered_obj = json.loads(jsonl_lines[0])
assert filtered_obj["trace_id"] == service_a_trace_id
assert filtered_obj["name"] == "operation-a"
def test_export_traces_with_limit(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_traces: Callable[[List[Traces]], None],
) -> None:
"""
Setup:
Insert 5 traces.
Tests:
1. Export traces with limit applied
2. Verify only limited number of traces are returned
"""
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
traces = []
for i in range(5):
traces.append(
Traces(
timestamp=now - timedelta(seconds=i),
duration=timedelta(seconds=1),
trace_id=TraceIdGenerator.trace_id(),
span_id=TraceIdGenerator.span_id(),
parent_span_id="",
name=f"operation-{i}",
kind=TracesKind.SPAN_KIND_SERVER,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={
"service.name": "test-service",
},
attributes={},
)
)
insert_traces(traces)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
# Calculate timestamps in nanoseconds
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
end_ns = int(now.timestamp() * 1e9)
params = {
"start": start_ns,
"end": end_ns,
"format": "csv",
"source": "traces",
"limit": 3,
}
# Export traces with limit (GET supports limit param)
response = requests.get(
signoz.self.host_configs["8080"].get(
f"/api/v1/export_raw_data?{urlencode(params)}"
),
timeout=10,
headers={
"authorization": f"Bearer {token}",
},
)
assert response.status_code == HTTPStatus.OK
assert response.headers["Content-Type"] == "text/csv"
# Parse CSV content
csv_content = response.text
csv_reader = csv.DictReader(io.StringIO(csv_content))
rows = list(csv_reader)
assert len(rows) == 3, f"Expected 3 rows (limited), got {len(rows)}"
def test_export_traces_multiple_queries_rejected(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
) -> None:
"""
Tests:
1. POST with multiple builder queries but no trace operator is rejected
2. Verify 400 error is returned
"""
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
end_ns = int(now.timestamp() * 1e9)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
body = {
"start": start_ns,
"end": end_ns,
"compositeQuery": {
"queries": [
{
"type": "builder_query",
"spec": {
"signal": "traces",
"name": "A",
"limit": 1000,
"filter": {"expression": "service.name = 'service-a'"},
},
},
{
"type": "builder_query",
"spec": {
"signal": "traces",
"name": "B",
"limit": 1000,
"filter": {"expression": "service.name = 'service-b'"},
},
},
]
},
}
url = signoz.self.host_configs["8080"].get("/api/v1/export_raw_data?format=jsonl")
response = requests.post(
url,
json=body,
timeout=10,
headers={
"authorization": f"Bearer {token}",
"Content-Type": "application/json",
},
)
assert response.status_code == HTTPStatus.BAD_REQUEST
def test_export_traces_with_composite_query_trace_operator(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_traces: Callable[[List[Traces]], None],
) -> None:
"""
Setup:
Insert multiple traces with parent-child relationships.
Tests:
1. Export traces using trace operator in composite query (POST)
2. Verify trace operator query works correctly
"""
parent_trace_id = TraceIdGenerator.trace_id()
parent_span_id = TraceIdGenerator.span_id()
child_span_id_1 = TraceIdGenerator.span_id()
child_span_id_2 = TraceIdGenerator.span_id()
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
insert_traces(
[
Traces(
timestamp=now - timedelta(seconds=10),
duration=timedelta(seconds=5),
trace_id=parent_trace_id,
span_id=parent_span_id,
parent_span_id="",
name="parent-operation",
kind=TracesKind.SPAN_KIND_SERVER,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={
"service.name": "parent-service",
},
attributes={
"operation.type": "parent",
},
),
Traces(
timestamp=now - timedelta(seconds=9),
duration=timedelta(seconds=2),
trace_id=parent_trace_id,
span_id=child_span_id_1,
parent_span_id=parent_span_id,
name="child-operation-1",
kind=TracesKind.SPAN_KIND_INTERNAL,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={
"service.name": "parent-service",
},
attributes={
"operation.type": "child",
},
),
Traces(
timestamp=now - timedelta(seconds=7),
duration=timedelta(seconds=1),
trace_id=parent_trace_id,
span_id=child_span_id_2,
parent_span_id=parent_span_id,
name="child-operation-2",
kind=TracesKind.SPAN_KIND_INTERNAL,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={
"service.name": "parent-service",
},
attributes={
"operation.type": "child",
},
),
]
)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
# Calculate timestamps in nanoseconds
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
end_ns = int(now.timestamp() * 1e9)
# A: spans with operation.type = 'parent'
query_a = {
"type": "builder_query",
"spec": {
"signal": "traces",
"name": "A",
"limit": 1000,
"filter": {"expression": "operation.type = 'parent'"},
},
}
# B: spans with operation.type = 'child'
query_b = {
"type": "builder_query",
"spec": {
"signal": "traces",
"name": "B",
"limit": 1000,
"filter": {"expression": "operation.type = 'child'"},
},
}
# Trace operator: find traces where A has a direct descendant B
query_c = {
"type": "builder_trace_operator",
"spec": {
"name": "C",
"expression": "A => B",
"returnSpansFrom": "A",
"limit": 1000,
"order": [{"key": {"name": "timestamp"}, "direction": "desc"}],
},
}
body = {
"start": start_ns,
"end": end_ns,
"requestType": "raw",
"compositeQuery": {
"queries": [query_a, query_b, query_c],
},
}
url = signoz.self.host_configs["8080"].get("/api/v1/export_raw_data?format=jsonl")
response = requests.post(
url,
json=body,
timeout=10,
headers={
"authorization": f"Bearer {token}",
"Content-Type": "application/json",
},
)
print(response.text)
assert response.status_code == HTTPStatus.OK
assert response.headers["Content-Type"] == "application/x-ndjson"
# Parse JSONL content
jsonl_lines = response.text.strip().split("\n")
assert len(jsonl_lines) == 1, f"Expected at least 1 line, got {len(jsonl_lines)}"
# Verify all returned spans belong to the matched trace
json_objects = [json.loads(line) for line in jsonl_lines]
trace_ids = [obj.get("trace_id") for obj in json_objects]
assert all(tid == parent_trace_id for tid in trace_ids)
# Verify the parent span (returnSpansFrom = "A") is present
span_names = [obj.get("name") for obj in json_objects]
assert "parent-operation" in span_names
def test_export_traces_with_select_fields(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_traces: Callable[[List[Traces]], None],
) -> None:
"""
Setup:
Insert traces with various attributes.
Tests:
1. Export traces with specific select fields via POST
2. Verify only specified fields are returned in the output
"""
trace_id = TraceIdGenerator.trace_id()
span_id = TraceIdGenerator.span_id()
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
insert_traces(
[
Traces(
timestamp=now - timedelta(seconds=10),
duration=timedelta(seconds=2),
trace_id=trace_id,
span_id=span_id,
parent_span_id="",
name="test-operation",
kind=TracesKind.SPAN_KIND_SERVER,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources={
"service.name": "test-service",
"deployment.environment": "production",
"host.name": "server-01",
},
attributes={
"http.method": "POST",
"http.status_code": "201",
"user.id": "user123",
},
),
]
)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
# Calculate timestamps in nanoseconds
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
end_ns = int(now.timestamp() * 1e9)
body = {
"start": start_ns,
"end": end_ns,
"requestType": "raw",
"compositeQuery": {
"queries": [
{
"type": "builder_query",
"spec": {
"signal": "traces",
"name": "A",
"limit": 1000,
"selectFields": [
{
"name": "trace_id",
"fieldDataType": "string",
"fieldContext": "span",
"signal": "traces",
},
{
"name": "span_id",
"fieldDataType": "string",
"fieldContext": "span",
"signal": "traces",
},
{
"name": "name",
"fieldDataType": "string",
"fieldContext": "span",
"signal": "traces",
},
{
"name": "service.name",
"fieldDataType": "string",
"fieldContext": "resource",
"signal": "traces",
},
],
},
}
]
},
}
url = signoz.self.host_configs["8080"].get("/api/v1/export_raw_data?format=jsonl")
response = requests.post(
url,
json=body,
timeout=10,
headers={
"authorization": f"Bearer {token}",
"Content-Type": "application/json",
},
)
assert response.status_code == HTTPStatus.OK
assert response.headers["Content-Type"] == "application/x-ndjson"
# Parse JSONL content
jsonl_lines = response.text.strip().split("\n")
assert len(jsonl_lines) == 1
# Verify the selected fields are present
result = json.loads(jsonl_lines[0])
assert "trace_id" in result
assert "span_id" in result
assert "name" in result
# Verify values
assert result["trace_id"] == trace_id
assert result["span_id"] == span_id
assert result["name"] == "test-operation"