Compare commits

..

37 Commits

Author SHA1 Message Date
Naman Verma
69375be52e Merge branch 'main' into nv/6204 2026-03-24 09:27:59 +05:30
Naman Verma
4d649a8e0c chore: go lint fix 2026-03-19 13:32:50 +05:30
Naman Verma
d1c6a3040b fix: consequences of load testing 2026-03-19 13:27:47 +05:30
Naman Verma
36e8d2e6c6 Merge branch 'main' into nv/6204 2026-03-18 20:05:03 +05:30
Naman Verma
1e4191c7bf test: integration test for gauge metrics 2026-03-18 12:56:09 +05:30
Naman Verma
edffb359c1 fix: exact check on metric order by key instead of contains check on metric name 2026-03-18 12:14:45 +05:30
Naman Verma
22cc7509fc chore: fix lint issues 2026-03-18 12:14:16 +05:30
Naman Verma
dee8e53b6b test: fix assertion failure messages 2026-03-18 11:57:12 +05:30
Naman Verma
285a31afa6 chore: remove unused var 2026-03-18 11:46:26 +05:30
Naman Verma
8cfa9dc038 chore: remove unused function 2026-03-18 11:40:34 +05:30
Naman Verma
b986b0a5f4 Merge branch 'main' into nv/6204 2026-03-18 11:32:09 +05:30
Naman Verma
28e0f2f7ad test: unit tests update 2026-03-13 22:16:47 +05:30
Naman Verma
cd458f0205 test: unit tests update 2026-03-13 21:53:20 +05:30
Naman Verma
ee2916e6c6 test: integration tests for histogram with many groups 2026-03-13 21:23:26 +05:30
Naman Verma
1c1d069263 test: integration tests for order by sum (part 2) 2026-03-12 19:49:00 +05:30
Naman Verma
7dc46db2e3 test: split count and p90 group by into 2 tests 2026-03-12 19:39:51 +05:30
Naman Verma
bfcd423a45 chore: remove logger used for debugging 2026-03-12 19:32:06 +05:30
Naman Verma
323b1163e5 test: integration tests for order by sum (part 1) 2026-03-12 19:31:07 +05:30
Naman Verma
673379a46c chore: separate CTE for histogram to be able to apply where clause for limit 2026-03-12 18:56:49 +05:30
Naman Verma
37f490c705 chore: lint issues 2026-03-12 14:35:27 +05:30
Naman Verma
324e34092e chore: rename vars 2026-03-12 12:20:49 +05:30
Naman Verma
2a2c365950 test: integration tests 2026-03-12 12:20:16 +05:30
Naman Verma
14065d39a6 Merge branch 'main' into nv/6204 2026-03-12 01:29:17 +05:30
Naman Verma
bf2133f1ab test: fix meter unit tests 2026-03-10 22:53:39 +05:30
Naman Verma
d7d907f687 test: max parametrising of the unit tests 2026-03-10 16:31:25 +05:30
Naman Verma
76b4549504 test: unit tests 2026-03-10 16:16:53 +05:30
Naman Verma
968a5089ff fix: check for tic when finding remaining keys 2026-03-10 16:16:29 +05:30
Naman Verma
c082bc3d76 chore: also sort by remaining group by keys 2026-03-10 15:43:45 +05:30
Naman Verma
59e0dcc865 chore: move order by ts asc out of all if branches 2026-03-10 15:40:26 +05:30
Naman Verma
89840189ef chore: remove comment 2026-03-10 15:16:09 +05:30
Naman Verma
b64a07db02 fix: limit in where subclause was coming as ? 2026-03-10 15:15:46 +05:30
Naman Verma
38d971b3c9 fix: add partition window when ordering by sum 2026-03-10 15:06:29 +05:30
Naman Verma
f8b266ce05 chore: first draft before testing 2026-03-10 14:55:38 +05:30
Naman Verma
20f7562cbc revert: wrong changes in stmt builder (vv wrong) 2026-03-10 13:36:33 +05:30
Naman Verma
29713964ce Merge branch 'main' into nv/6204 2026-03-10 13:33:26 +05:30
Naman Verma
afb252b4f9 Merge branch 'main' into nv/6204 2026-03-06 08:26:09 +05:30
Naman Verma
c808b4d759 fix: consume order by and limit for metrics in clickhouse query 2026-03-05 09:29:23 +05:30
61 changed files with 4459 additions and 6223 deletions

View File

@@ -144,8 +144,6 @@ telemetrystore:
##################### Prometheus #####################
prometheus:
# The maximum time a PromQL query is allowed to run before being aborted.
timeout: 2m
active_query_tracker:
# Whether to enable the active query tracker.
enabled: true

File diff suppressed because it is too large Load Diff

View File

@@ -257,7 +257,7 @@ func TestManager_TestNotification_SendUnmatched_PromRule(t *testing.T) {
WillReturnRows(samplesRows)
// Create Prometheus provider for this test
promProvider = prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, store)
promProvider = prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, store)
},
ManagerOptionsHook: func(opts *rules.ManagerOptions) {
// Set Prometheus provider for PromQL queries

File diff suppressed because it is too large Load Diff

View File

@@ -425,39 +425,6 @@ export interface AuthtypesSessionContextDTO {
orgs?: AuthtypesOrgSessionContextDTO[] | null;
}
export interface AuthtypesStorableRoleDTO {
/**
* @type string
* @format date-time
*/
createdAt?: Date;
/**
* @type string
*/
description?: string;
/**
* @type string
*/
id: string;
/**
* @type string
*/
name?: string;
/**
* @type string
*/
orgId?: string;
/**
* @type string
*/
type?: string;
/**
* @type string
* @format date-time
*/
updatedAt?: Date;
}
export interface AuthtypesTransactionDTO {
object: AuthtypesObjectDTO;
/**
@@ -470,504 +437,6 @@ export interface AuthtypesUpdateableAuthDomainDTO {
config?: AuthtypesAuthDomainConfigDTO;
}
export interface AuthtypesUserRoleDTO {
/**
* @type string
* @format date-time
*/
createdAt?: Date;
/**
* @type string
*/
id: string;
role?: AuthtypesStorableRoleDTO;
/**
* @type string
*/
role_id?: string;
/**
* @type string
* @format date-time
*/
updatedAt?: Date;
/**
* @type string
*/
user_id?: string;
}
export interface AuthtypesUserWithRolesDTO {
/**
* @type string
* @format date-time
*/
createdAt?: Date;
/**
* @type string
*/
displayName?: string;
/**
* @type string
*/
email?: string;
/**
* @type string
*/
id: string;
/**
* @type boolean
*/
isRoot?: boolean;
/**
* @type string
*/
orgId?: string;
/**
* @type string
*/
status?: string;
/**
* @type string
* @format date-time
*/
updatedAt?: Date;
/**
* @type array
* @nullable true
*/
userRoles?: AuthtypesUserRoleDTO[] | null;
}
export interface CloudintegrationtypesAWSAccountConfigDTO {
/**
* @type array
*/
regions: string[];
}
export type CloudintegrationtypesAWSCollectionStrategyDTOS3Buckets = {
[key: string]: string[];
};
export interface CloudintegrationtypesAWSCollectionStrategyDTO {
aws_logs?: CloudintegrationtypesAWSLogsStrategyDTO;
aws_metrics?: CloudintegrationtypesAWSMetricsStrategyDTO;
/**
* @type object
*/
s3_buckets?: CloudintegrationtypesAWSCollectionStrategyDTOS3Buckets;
}
export interface CloudintegrationtypesAWSConnectionArtifactDTO {
/**
* @type string
*/
connectionURL: string;
}
export interface CloudintegrationtypesAWSConnectionArtifactRequestDTO {
/**
* @type string
*/
deploymentRegion: string;
/**
* @type array
*/
regions: string[];
}
export interface CloudintegrationtypesAWSIntegrationConfigDTO {
/**
* @type array
*/
enabledRegions: string[];
telemetry: CloudintegrationtypesAWSCollectionStrategyDTO;
}
export type CloudintegrationtypesAWSLogsStrategyDTOCloudwatchLogsSubscriptionsItem = {
/**
* @type string
*/
filter_pattern?: string;
/**
* @type string
*/
log_group_name_prefix?: string;
};
export interface CloudintegrationtypesAWSLogsStrategyDTO {
/**
* @type array
* @nullable true
*/
cloudwatch_logs_subscriptions?:
| CloudintegrationtypesAWSLogsStrategyDTOCloudwatchLogsSubscriptionsItem[]
| null;
}
export type CloudintegrationtypesAWSMetricsStrategyDTOCloudwatchMetricStreamFiltersItem = {
/**
* @type array
*/
MetricNames?: string[];
/**
* @type string
*/
Namespace?: string;
};
export interface CloudintegrationtypesAWSMetricsStrategyDTO {
/**
* @type array
* @nullable true
*/
cloudwatch_metric_stream_filters?:
| CloudintegrationtypesAWSMetricsStrategyDTOCloudwatchMetricStreamFiltersItem[]
| null;
}
export interface CloudintegrationtypesAWSServiceConfigDTO {
logs?: CloudintegrationtypesAWSServiceLogsConfigDTO;
metrics?: CloudintegrationtypesAWSServiceMetricsConfigDTO;
}
export type CloudintegrationtypesAWSServiceLogsConfigDTOS3Buckets = {
[key: string]: string[];
};
export interface CloudintegrationtypesAWSServiceLogsConfigDTO {
/**
* @type boolean
*/
enabled?: boolean;
/**
* @type object
*/
s3_buckets?: CloudintegrationtypesAWSServiceLogsConfigDTOS3Buckets;
}
export interface CloudintegrationtypesAWSServiceMetricsConfigDTO {
/**
* @type boolean
*/
enabled?: boolean;
}
export interface CloudintegrationtypesAccountDTO {
agentReport: CloudintegrationtypesAgentReportDTO;
config: CloudintegrationtypesAccountConfigDTO;
/**
* @type string
* @format date-time
*/
createdAt?: Date;
/**
* @type string
*/
id: string;
/**
* @type string
*/
orgId: string;
/**
* @type string
*/
provider: string;
/**
* @type string
* @nullable true
*/
providerAccountId: string | null;
/**
* @type string
* @format date-time
* @nullable true
*/
removedAt: Date | null;
/**
* @type string
* @format date-time
*/
updatedAt?: Date;
}
export interface CloudintegrationtypesAccountConfigDTO {
aws: CloudintegrationtypesAWSAccountConfigDTO;
}
/**
* @nullable
*/
export type CloudintegrationtypesAgentReportDTOData = {
[key: string]: unknown;
} | null;
/**
* @nullable
*/
export type CloudintegrationtypesAgentReportDTO = {
/**
* @type object
* @nullable true
*/
data: CloudintegrationtypesAgentReportDTOData;
/**
* @type integer
* @format int64
*/
timestampMillis: number;
} | null;
export interface CloudintegrationtypesAssetsDTO {
/**
* @type array
* @nullable true
*/
dashboards?: CloudintegrationtypesDashboardDTO[] | null;
}
export interface CloudintegrationtypesCollectedLogAttributeDTO {
/**
* @type string
*/
name?: string;
/**
* @type string
*/
path?: string;
/**
* @type string
*/
type?: string;
}
export interface CloudintegrationtypesCollectedMetricDTO {
/**
* @type string
*/
description?: string;
/**
* @type string
*/
name?: string;
/**
* @type string
*/
type?: string;
/**
* @type string
*/
unit?: string;
}
export interface CloudintegrationtypesCollectionStrategyDTO {
aws: CloudintegrationtypesAWSCollectionStrategyDTO;
}
export interface CloudintegrationtypesConnectionArtifactDTO {
aws: CloudintegrationtypesAWSConnectionArtifactDTO;
}
export interface CloudintegrationtypesConnectionArtifactRequestDTO {
aws: CloudintegrationtypesAWSConnectionArtifactRequestDTO;
}
export interface CloudintegrationtypesDashboardDTO {
definition?: DashboardtypesStorableDashboardDataDTO;
/**
* @type string
*/
description?: string;
/**
* @type string
*/
id?: string;
/**
* @type string
*/
title?: string;
}
export interface CloudintegrationtypesDataCollectedDTO {
/**
* @type array
* @nullable true
*/
logs?: CloudintegrationtypesCollectedLogAttributeDTO[] | null;
/**
* @type array
* @nullable true
*/
metrics?: CloudintegrationtypesCollectedMetricDTO[] | null;
}
export interface CloudintegrationtypesGettableAccountWithArtifactDTO {
connectionArtifact: CloudintegrationtypesConnectionArtifactDTO;
/**
* @type string
*/
id: string;
}
export interface CloudintegrationtypesGettableAccountsDTO {
/**
* @type array
*/
accounts: CloudintegrationtypesAccountDTO[];
}
export interface CloudintegrationtypesGettableAgentCheckInResponseDTO {
/**
* @type string
*/
account_id: string;
/**
* @type string
*/
cloud_account_id: string;
/**
* @type string
*/
cloudIntegrationId: string;
integration_config: CloudintegrationtypesIntegrationConfigDTO;
integrationConfig: CloudintegrationtypesProviderIntegrationConfigDTO;
/**
* @type string
*/
providerAccountId: string;
/**
* @type string
* @format date-time
* @nullable true
*/
removed_at: Date | null;
/**
* @type string
* @format date-time
* @nullable true
*/
removedAt: Date | null;
}
export interface CloudintegrationtypesGettableServicesMetadataDTO {
/**
* @type array
*/
services: CloudintegrationtypesServiceMetadataDTO[];
}
/**
* @nullable
*/
export type CloudintegrationtypesIntegrationConfigDTO = {
/**
* @type array
*/
enabled_regions: string[];
telemetry: CloudintegrationtypesAWSCollectionStrategyDTO;
} | null;
/**
* @nullable
*/
export type CloudintegrationtypesPostableAgentCheckInRequestDTOData = {
[key: string]: unknown;
} | null;
export interface CloudintegrationtypesPostableAgentCheckInRequestDTO {
/**
* @type string
*/
account_id?: string;
/**
* @type string
*/
cloud_account_id?: string;
/**
* @type string
*/
cloudIntegrationId?: string;
/**
* @type object
* @nullable true
*/
data: CloudintegrationtypesPostableAgentCheckInRequestDTOData;
/**
* @type string
*/
providerAccountId?: string;
}
export interface CloudintegrationtypesProviderIntegrationConfigDTO {
aws: CloudintegrationtypesAWSIntegrationConfigDTO;
}
export interface CloudintegrationtypesServiceDTO {
assets: CloudintegrationtypesAssetsDTO;
dataCollected: CloudintegrationtypesDataCollectedDTO;
/**
* @type string
*/
icon: string;
/**
* @type string
*/
id: string;
/**
* @type string
*/
overview: string;
serviceConfig?: CloudintegrationtypesServiceConfigDTO;
supported_signals: CloudintegrationtypesSupportedSignalsDTO;
telemetryCollectionStrategy: CloudintegrationtypesCollectionStrategyDTO;
/**
* @type string
*/
title: string;
}
export interface CloudintegrationtypesServiceConfigDTO {
aws: CloudintegrationtypesAWSServiceConfigDTO;
}
export interface CloudintegrationtypesServiceMetadataDTO {
/**
* @type boolean
*/
enabled: boolean;
/**
* @type string
*/
icon: string;
/**
* @type string
*/
id: string;
/**
* @type string
*/
title: string;
}
export interface CloudintegrationtypesSupportedSignalsDTO {
/**
* @type boolean
*/
logs?: boolean;
/**
* @type boolean
*/
metrics?: boolean;
}
export interface CloudintegrationtypesUpdatableAccountDTO {
config: CloudintegrationtypesAccountConfigDTO;
}
export interface CloudintegrationtypesUpdatableServiceDTO {
config: CloudintegrationtypesServiceConfigDTO;
}
export interface DashboardtypesDashboardDTO {
/**
* @type string
@@ -3180,13 +2649,6 @@ export interface TypesPostableResetPasswordDTO {
token?: string;
}
export interface TypesPostableRoleDTO {
/**
* @type string
*/
name: string;
}
export interface TypesResetPasswordTokenDTO {
/**
* @type string
@@ -3252,13 +2714,6 @@ export interface TypesStorableAPIKeyDTO {
userId?: string;
}
export interface TypesUpdatableUserDTO {
/**
* @type string
*/
displayName: string;
}
export interface TypesUserDTO {
/**
* @type string
@@ -3403,97 +2858,6 @@ export type AuthzResources200 = {
export type ChangePasswordPathParameters = {
id: string;
};
export type AgentCheckInDeprecatedPathParameters = {
cloudProvider: string;
};
export type AgentCheckInDeprecated200 = {
data: CloudintegrationtypesGettableAgentCheckInResponseDTO;
/**
* @type string
*/
status: string;
};
export type ListAccountsPathParameters = {
cloudProvider: string;
};
export type ListAccounts200 = {
data: CloudintegrationtypesGettableAccountsDTO;
/**
* @type string
*/
status: string;
};
export type CreateAccountPathParameters = {
cloudProvider: string;
};
export type CreateAccount200 = {
data: CloudintegrationtypesGettableAccountWithArtifactDTO;
/**
* @type string
*/
status: string;
};
export type DisconnectAccountPathParameters = {
cloudProvider: string;
id: string;
};
export type GetAccountPathParameters = {
cloudProvider: string;
id: string;
};
export type GetAccount200 = {
data: CloudintegrationtypesAccountDTO;
/**
* @type string
*/
status: string;
};
export type UpdateAccountPathParameters = {
cloudProvider: string;
id: string;
};
export type AgentCheckInPathParameters = {
cloudProvider: string;
};
export type AgentCheckIn200 = {
data: CloudintegrationtypesGettableAgentCheckInResponseDTO;
/**
* @type string
*/
status: string;
};
export type ListServicesMetadataPathParameters = {
cloudProvider: string;
};
export type ListServicesMetadata200 = {
data: CloudintegrationtypesGettableServicesMetadataDTO;
/**
* @type string
*/
status: string;
};
export type GetServicePathParameters = {
cloudProvider: string;
serviceId: string;
};
export type GetService200 = {
data: CloudintegrationtypesServiceDTO;
/**
* @type string
*/
status: string;
};
export type UpdateServicePathParameters = {
cloudProvider: string;
serviceId: string;
};
export type CreateSessionByGoogleCallback303 = {
data: AuthtypesGettableTokenDTO;
/**
@@ -3965,7 +3329,7 @@ export type UpdateServiceAccountKeyPathParameters = {
export type UpdateServiceAccountStatusPathParameters = {
id: string;
};
export type ListUsersDeprecated200 = {
export type ListUsers200 = {
/**
* @type array
*/
@@ -3979,10 +3343,10 @@ export type ListUsersDeprecated200 = {
export type DeleteUserPathParameters = {
id: string;
};
export type GetUserDeprecatedPathParameters = {
export type GetUserPathParameters = {
id: string;
};
export type GetUserDeprecated200 = {
export type GetUser200 = {
data: TypesDeprecatedUserDTO;
/**
* @type string
@@ -3990,10 +3354,10 @@ export type GetUserDeprecated200 = {
status: string;
};
export type UpdateUserDeprecatedPathParameters = {
export type UpdateUserPathParameters = {
id: string;
};
export type UpdateUserDeprecated200 = {
export type UpdateUser200 = {
data: TypesDeprecatedUserDTO;
/**
* @type string
@@ -4001,7 +3365,7 @@ export type UpdateUserDeprecated200 = {
status: string;
};
export type GetMyUserDeprecated200 = {
export type GetMyUser200 = {
data: TypesDeprecatedUserDTO;
/**
* @type string
@@ -4298,20 +3662,6 @@ export type Readyz503 = {
status: string;
};
export type GetUsersByRoleIDPathParameters = {
id: string;
};
export type GetUsersByRoleID200 = {
/**
* @type array
*/
data: TypesUserDTO[];
/**
* @type string
*/
status: string;
};
export type GetSessionContext200 = {
data: AuthtypesSessionContextDTO;
/**
@@ -4336,60 +3686,6 @@ export type RotateSession200 = {
status: string;
};
export type ListUsers200 = {
/**
* @type array
*/
data: TypesUserDTO[];
/**
* @type string
*/
status: string;
};
export type GetUserPathParameters = {
id: string;
};
export type GetUser200 = {
data: AuthtypesUserWithRolesDTO;
/**
* @type string
*/
status: string;
};
export type UpdateUserPathParameters = {
id: string;
};
export type GetRolesByUserIDPathParameters = {
id: string;
};
export type GetRolesByUserID200 = {
/**
* @type array
*/
data: AuthtypesRoleDTO[];
/**
* @type string
*/
status: string;
};
export type SetRoleByUserIDPathParameters = {
id: string;
};
export type RemoveUserRoleByUserIDAndRoleIDPathParameters = {
id: string;
roleId: string;
};
export type GetMyUser200 = {
data: AuthtypesUserWithRolesDTO;
/**
* @type string
*/
status: string;
};
export type GetHosts200 = {
data: ZeustypesGettableHostDTO;
/**

File diff suppressed because it is too large Load Diff

View File

@@ -20,7 +20,7 @@ import { RenderErrorResponseDTO } from 'api/generated/services/sigNoz.schemas';
import {
getResetPasswordToken,
useDeleteUser,
useUpdateUserDeprecated,
useUpdateUser,
} from 'api/generated/services/users';
import { AxiosError } from 'axios';
import { MemberRow } from 'components/MembersTable/MembersTable';
@@ -60,7 +60,7 @@ function EditMemberDrawer({
const isInvited = member?.status === MemberStatus.Invited;
const { mutate: updateUser, isLoading: isSaving } = useUpdateUserDeprecated({
const { mutate: updateUser, isLoading: isSaving } = useUpdateUser({
mutation: {
onSuccess: (): void => {
toast.success('Member details updated successfully', { richColors: true });

View File

@@ -4,7 +4,7 @@ import { convertToApiError } from 'api/ErrorResponseHandlerForGeneratedAPIs';
import {
getResetPasswordToken,
useDeleteUser,
useUpdateUserDeprecated,
useUpdateUser,
} from 'api/generated/services/users';
import { MemberStatus } from 'container/MembersSettings/utils';
import {
@@ -50,7 +50,7 @@ jest.mock('@signozhq/dialog', () => ({
jest.mock('api/generated/services/users', () => ({
useDeleteUser: jest.fn(),
useUpdateUserDeprecated: jest.fn(),
useUpdateUser: jest.fn(),
getResetPasswordToken: jest.fn(),
}));
@@ -105,7 +105,7 @@ function renderDrawer(
describe('EditMemberDrawer', () => {
beforeEach(() => {
jest.clearAllMocks();
(useUpdateUserDeprecated as jest.Mock).mockReturnValue({
(useUpdateUser as jest.Mock).mockReturnValue({
mutate: mockUpdateMutate,
isLoading: false,
});
@@ -130,7 +130,7 @@ describe('EditMemberDrawer', () => {
const onComplete = jest.fn();
const user = userEvent.setup({ pointerEventsCheck: 0 });
(useUpdateUserDeprecated as jest.Mock).mockImplementation((options) => ({
(useUpdateUser as jest.Mock).mockImplementation((options) => ({
mutate: mockUpdateMutate.mockImplementation(() => {
options?.mutation?.onSuccess?.();
}),
@@ -239,7 +239,7 @@ describe('EditMemberDrawer', () => {
const onComplete = jest.fn();
const user = userEvent.setup({ pointerEventsCheck: 0 });
(useUpdateUserDeprecated as jest.Mock).mockImplementation((options) => ({
(useUpdateUser as jest.Mock).mockImplementation((options) => ({
mutate: mockUpdateMutate.mockImplementation(() => {
options?.mutation?.onSuccess?.();
}),
@@ -280,7 +280,7 @@ describe('EditMemberDrawer', () => {
const user = userEvent.setup({ pointerEventsCheck: 0 });
const mockToast = jest.mocked(toast);
(useUpdateUserDeprecated as jest.Mock).mockImplementation((options) => ({
(useUpdateUser as jest.Mock).mockImplementation((options) => ({
mutate: mockUpdateMutate.mockImplementation(() => {
options?.mutation?.onError?.({});
}),

View File

@@ -62,6 +62,9 @@ export const getVolumeQueryPayload = (
const k8sPVCNameKey = dotMetricsEnabled
? 'k8s.persistentvolumeclaim.name'
: 'k8s_persistentvolumeclaim_name';
const legendTemplate = dotMetricsEnabled
? '{{k8s.namespace.name}}-{{k8s.pod.name}}'
: '{{k8s_namespace_name}}-{{k8s_pod_name}}';
return [
{
@@ -133,7 +136,7 @@ export const getVolumeQueryPayload = (
functions: [],
groupBy: [],
having: [],
legend: 'Available',
legend: legendTemplate,
limit: null,
orderBy: [],
queryName: 'A',
@@ -225,7 +228,7 @@ export const getVolumeQueryPayload = (
functions: [],
groupBy: [],
having: [],
legend: 'Capacity',
legend: legendTemplate,
limit: null,
orderBy: [],
queryName: 'A',
@@ -316,7 +319,7 @@ export const getVolumeQueryPayload = (
},
groupBy: [],
having: [],
legend: 'Inodes Used',
legend: legendTemplate,
limit: null,
orderBy: [],
queryName: 'A',
@@ -408,7 +411,7 @@ export const getVolumeQueryPayload = (
},
groupBy: [],
having: [],
legend: 'Total Inodes',
legend: legendTemplate,
limit: null,
orderBy: [],
queryName: 'A',
@@ -500,7 +503,7 @@ export const getVolumeQueryPayload = (
},
groupBy: [],
having: [],
legend: 'Inodes Free',
legend: legendTemplate,
limit: null,
orderBy: [],
queryName: 'A',

View File

@@ -1,216 +0,0 @@
package signozapiserver
import (
"net/http"
"github.com/SigNoz/signoz/pkg/http/handler"
"github.com/SigNoz/signoz/pkg/types"
citypes "github.com/SigNoz/signoz/pkg/types/cloudintegrationtypes"
"github.com/gorilla/mux"
)
func (provider *provider) addCloudIntegrationRoutes(router *mux.Router) error {
if err := router.Handle("/api/v1/cloud_integrations/{cloud_provider}/accounts", handler.New(
provider.authZ.AdminAccess(provider.cloudIntegrationHandler.CreateAccount),
handler.OpenAPIDef{
ID: "CreateAccount",
Tags: []string{"cloudintegration"},
Summary: "Create account",
Description: "This endpoint creates a new cloud integration account for the specified cloud provider",
Request: new(citypes.PostableConnectionArtifact),
RequestContentType: "application/json",
Response: new(citypes.GettableAccountWithArtifact),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
},
)).Methods(http.MethodPost).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v1/cloud_integrations/{cloud_provider}/accounts", handler.New(
provider.authZ.AdminAccess(provider.cloudIntegrationHandler.ListAccounts),
handler.OpenAPIDef{
ID: "ListAccounts",
Tags: []string{"cloudintegration"},
Summary: "List accounts",
Description: "This endpoint lists the accounts for the specified cloud provider",
Request: nil,
RequestContentType: "",
Response: new(citypes.GettableAccounts),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
},
)).Methods(http.MethodGet).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v1/cloud_integrations/{cloud_provider}/accounts/{id}", handler.New(
provider.authZ.AdminAccess(provider.cloudIntegrationHandler.GetAccount),
handler.OpenAPIDef{
ID: "GetAccount",
Tags: []string{"cloudintegration"},
Summary: "Get account",
Description: "This endpoint gets an account for the specified cloud provider",
Request: nil,
RequestContentType: "",
Response: new(citypes.GettableAccount),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
},
)).Methods(http.MethodGet).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v1/cloud_integrations/{cloud_provider}/accounts/{id}", handler.New(
provider.authZ.AdminAccess(provider.cloudIntegrationHandler.UpdateAccount),
handler.OpenAPIDef{
ID: "UpdateAccount",
Tags: []string{"cloudintegration"},
Summary: "Update account",
Description: "This endpoint updates an account for the specified cloud provider",
Request: new(citypes.UpdatableAccount),
RequestContentType: "application/json",
Response: nil,
ResponseContentType: "",
SuccessStatusCode: http.StatusNoContent,
ErrorStatusCodes: []int{},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
},
)).Methods(http.MethodPut).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v1/cloud_integrations/{cloud_provider}/accounts/{id}", handler.New(
provider.authZ.AdminAccess(provider.cloudIntegrationHandler.DisconnectAccount),
handler.OpenAPIDef{
ID: "DisconnectAccount",
Tags: []string{"cloudintegration"},
Summary: "Disconnect account",
Description: "This endpoint disconnects an account for the specified cloud provider",
Request: nil,
RequestContentType: "",
Response: nil,
ResponseContentType: "",
SuccessStatusCode: http.StatusNoContent,
ErrorStatusCodes: []int{},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
},
)).Methods(http.MethodDelete).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v1/cloud_integrations/{cloud_provider}/services", handler.New(
provider.authZ.AdminAccess(provider.cloudIntegrationHandler.ListServicesMetadata),
handler.OpenAPIDef{
ID: "ListServicesMetadata",
Tags: []string{"cloudintegration"},
Summary: "List services metadata",
Description: "This endpoint lists the services metadata for the specified cloud provider",
Request: nil,
RequestContentType: "",
Response: new(citypes.GettableServicesMetadata),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
},
)).Methods(http.MethodGet).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v1/cloud_integrations/{cloud_provider}/services/{service_id}", handler.New(
provider.authZ.AdminAccess(provider.cloudIntegrationHandler.GetService),
handler.OpenAPIDef{
ID: "GetService",
Tags: []string{"cloudintegration"},
Summary: "Get service",
Description: "This endpoint gets a service for the specified cloud provider",
Request: nil,
RequestContentType: "",
Response: new(citypes.GettableService),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
},
)).Methods(http.MethodGet).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v1/cloud_integrations/{cloud_provider}/services/{service_id}", handler.New(
provider.authZ.AdminAccess(provider.cloudIntegrationHandler.UpdateService),
handler.OpenAPIDef{
ID: "UpdateService",
Tags: []string{"cloudintegration"},
Summary: "Update service",
Description: "This endpoint updates a service for the specified cloud provider",
Request: new(citypes.UpdatableService),
RequestContentType: "application/json",
Response: nil,
ResponseContentType: "",
SuccessStatusCode: http.StatusNoContent,
ErrorStatusCodes: []int{},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
},
)).Methods(http.MethodPut).GetError(); err != nil {
return err
}
// Agent check-in endpoint is kept same as older one to maintain backward compatibility with already deployed agents.
// In the future, this endpoint will be deprecated and a new endpoint will be introduced for consistency with above endpoints.
if err := router.Handle("/api/v1/cloud-integrations/{cloud_provider}/agent-check-in", handler.New(
provider.authZ.ViewAccess(provider.cloudIntegrationHandler.AgentCheckIn),
handler.OpenAPIDef{
ID: "AgentCheckInDeprecated",
Tags: []string{"cloudintegration"},
Summary: "Agent check-in",
Description: "[Deprecated] This endpoint is called by the deployed agent to check in",
Request: new(citypes.PostableAgentCheckInRequest),
RequestContentType: "application/json",
Response: new(citypes.GettableAgentCheckInResponse),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{},
Deprecated: true, // this endpoint will be deprecated in future
SecuritySchemes: newSecuritySchemes(types.RoleViewer), // agent role is viewer
},
)).Methods(http.MethodPost).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v1/cloud_integrations/{cloud_provider}/accounts/check_in", handler.New(
provider.authZ.ViewAccess(provider.cloudIntegrationHandler.AgentCheckIn),
handler.OpenAPIDef{
ID: "AgentCheckIn",
Tags: []string{"cloudintegration"},
Summary: "Agent check-in",
Description: "This endpoint is called by the deployed agent to check in",
Request: new(citypes.PostableAgentCheckInRequest),
RequestContentType: "application/json",
Response: new(citypes.GettableAgentCheckInResponse),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleViewer), // agent role is viewer
},
)).Methods(http.MethodPost).GetError(); err != nil {
return err
}
return nil
}

View File

@@ -12,7 +12,6 @@ import (
"github.com/SigNoz/signoz/pkg/http/handler"
"github.com/SigNoz/signoz/pkg/http/middleware"
"github.com/SigNoz/signoz/pkg/modules/authdomain"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
"github.com/SigNoz/signoz/pkg/modules/fields"
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
@@ -31,30 +30,29 @@ import (
)
type provider struct {
config apiserver.Config
settings factory.ScopedProviderSettings
router *mux.Router
authZ *middleware.AuthZ
orgHandler organization.Handler
userHandler user.Handler
sessionHandler session.Handler
authDomainHandler authdomain.Handler
preferenceHandler preference.Handler
globalHandler global.Handler
promoteHandler promote.Handler
flaggerHandler flagger.Handler
dashboardModule dashboard.Module
dashboardHandler dashboard.Handler
metricsExplorerHandler metricsexplorer.Handler
gatewayHandler gateway.Handler
fieldsHandler fields.Handler
authzHandler authz.Handler
rawDataExportHandler rawdataexport.Handler
zeusHandler zeus.Handler
querierHandler querier.Handler
serviceAccountHandler serviceaccount.Handler
factoryHandler factory.Handler
cloudIntegrationHandler cloudintegration.Handler
config apiserver.Config
settings factory.ScopedProviderSettings
router *mux.Router
authZ *middleware.AuthZ
orgHandler organization.Handler
userHandler user.Handler
sessionHandler session.Handler
authDomainHandler authdomain.Handler
preferenceHandler preference.Handler
globalHandler global.Handler
promoteHandler promote.Handler
flaggerHandler flagger.Handler
dashboardModule dashboard.Module
dashboardHandler dashboard.Handler
metricsExplorerHandler metricsexplorer.Handler
gatewayHandler gateway.Handler
fieldsHandler fields.Handler
authzHandler authz.Handler
rawDataExportHandler rawdataexport.Handler
zeusHandler zeus.Handler
querierHandler querier.Handler
serviceAccountHandler serviceaccount.Handler
factoryHandler factory.Handler
}
func NewFactory(
@@ -79,7 +77,6 @@ func NewFactory(
querierHandler querier.Handler,
serviceAccountHandler serviceaccount.Handler,
factoryHandler factory.Handler,
cloudIntegrationHandler cloudintegration.Handler,
) factory.ProviderFactory[apiserver.APIServer, apiserver.Config] {
return factory.NewProviderFactory(factory.MustNewName("signoz"), func(ctx context.Context, providerSettings factory.ProviderSettings, config apiserver.Config) (apiserver.APIServer, error) {
return newProvider(
@@ -107,7 +104,6 @@ func NewFactory(
querierHandler,
serviceAccountHandler,
factoryHandler,
cloudIntegrationHandler,
)
})
}
@@ -137,35 +133,33 @@ func newProvider(
querierHandler querier.Handler,
serviceAccountHandler serviceaccount.Handler,
factoryHandler factory.Handler,
cloudIntegrationHandler cloudintegration.Handler,
) (apiserver.APIServer, error) {
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/pkg/apiserver/signozapiserver")
router := mux.NewRouter().UseEncodedPath()
provider := &provider{
config: config,
settings: settings,
router: router,
orgHandler: orgHandler,
userHandler: userHandler,
sessionHandler: sessionHandler,
authDomainHandler: authDomainHandler,
preferenceHandler: preferenceHandler,
globalHandler: globalHandler,
promoteHandler: promoteHandler,
flaggerHandler: flaggerHandler,
dashboardModule: dashboardModule,
dashboardHandler: dashboardHandler,
metricsExplorerHandler: metricsExplorerHandler,
gatewayHandler: gatewayHandler,
fieldsHandler: fieldsHandler,
authzHandler: authzHandler,
rawDataExportHandler: rawDataExportHandler,
zeusHandler: zeusHandler,
querierHandler: querierHandler,
serviceAccountHandler: serviceAccountHandler,
factoryHandler: factoryHandler,
cloudIntegrationHandler: cloudIntegrationHandler,
config: config,
settings: settings,
router: router,
orgHandler: orgHandler,
userHandler: userHandler,
sessionHandler: sessionHandler,
authDomainHandler: authDomainHandler,
preferenceHandler: preferenceHandler,
globalHandler: globalHandler,
promoteHandler: promoteHandler,
flaggerHandler: flaggerHandler,
dashboardModule: dashboardModule,
dashboardHandler: dashboardHandler,
metricsExplorerHandler: metricsExplorerHandler,
gatewayHandler: gatewayHandler,
fieldsHandler: fieldsHandler,
authzHandler: authzHandler,
rawDataExportHandler: rawDataExportHandler,
zeusHandler: zeusHandler,
querierHandler: querierHandler,
serviceAccountHandler: serviceAccountHandler,
factoryHandler: factoryHandler,
}
provider.authZ = middleware.NewAuthZ(settings.Logger(), orgGetter, authz)
@@ -258,10 +252,6 @@ func (provider *provider) AddToRouter(router *mux.Router) error {
return err
}
if err := provider.addCloudIntegrationRoutes(router); err != nil {
return err
}
return nil
}

View File

@@ -111,8 +111,8 @@ func (provider *provider) addUserRoutes(router *mux.Router) error {
return err
}
if err := router.Handle("/api/v1/user", handler.New(provider.authZ.AdminAccess(provider.userHandler.ListUsersDeprecated), handler.OpenAPIDef{
ID: "ListUsersDeprecated",
if err := router.Handle("/api/v1/user", handler.New(provider.authZ.AdminAccess(provider.userHandler.ListUsers), handler.OpenAPIDef{
ID: "ListUsers",
Tags: []string{"users"},
Summary: "List users",
Description: "This endpoint lists all users",
@@ -128,25 +128,8 @@ func (provider *provider) addUserRoutes(router *mux.Router) error {
return err
}
if err := router.Handle("/api/v2/users", handler.New(provider.authZ.AdminAccess(provider.userHandler.ListUsers), handler.OpenAPIDef{
ID: "ListUsers",
Tags: []string{"users"},
Summary: "List users v2",
Description: "This endpoint lists all users for the organization",
Request: nil,
RequestContentType: "",
Response: make([]*types.User, 0),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
})).Methods(http.MethodGet).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v1/user/me", handler.New(provider.authZ.OpenAccess(provider.userHandler.GetMyUserDeprecated), handler.OpenAPIDef{
ID: "GetMyUserDeprecated",
if err := router.Handle("/api/v1/user/me", handler.New(provider.authZ.OpenAccess(provider.userHandler.GetMyUser), handler.OpenAPIDef{
ID: "GetMyUser",
Tags: []string{"users"},
Summary: "Get my user",
Description: "This endpoint returns the user I belong to",
@@ -162,42 +145,8 @@ func (provider *provider) addUserRoutes(router *mux.Router) error {
return err
}
if err := router.Handle("/api/v2/users/me", handler.New(provider.authZ.OpenAccess(provider.userHandler.GetMyUser), handler.OpenAPIDef{
ID: "GetMyUser",
Tags: []string{"users"},
Summary: "Get my user v2",
Description: "This endpoint returns the user I belong to",
Request: nil,
RequestContentType: "",
Response: new(authtypes.UserWithRoles),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{},
Deprecated: false,
SecuritySchemes: []handler.OpenAPISecurityScheme{{Name: authtypes.IdentNProviderTokenizer.StringValue()}},
})).Methods(http.MethodGet).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v2/users/me", handler.New(provider.authZ.OpenAccess(provider.userHandler.UpdateMyUser), handler.OpenAPIDef{
ID: "UpdateMyUserV2",
Tags: []string{"users"},
Summary: "Update my user v2",
Description: "This endpoint updates the user I belong to",
Request: new(types.UpdatableUser),
RequestContentType: "application/json",
Response: nil,
ResponseContentType: "",
SuccessStatusCode: http.StatusNoContent,
ErrorStatusCodes: []int{},
Deprecated: false,
SecuritySchemes: []handler.OpenAPISecurityScheme{{Name: authtypes.IdentNProviderTokenizer.StringValue()}},
})).Methods(http.MethodPut).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v1/user/{id}", handler.New(provider.authZ.SelfAccess(provider.userHandler.GetUserDeprecated), handler.OpenAPIDef{
ID: "GetUserDeprecated",
if err := router.Handle("/api/v1/user/{id}", handler.New(provider.authZ.SelfAccess(provider.userHandler.GetUser), handler.OpenAPIDef{
ID: "GetUser",
Tags: []string{"users"},
Summary: "Get user",
Description: "This endpoint returns the user by id",
@@ -213,25 +162,8 @@ func (provider *provider) addUserRoutes(router *mux.Router) error {
return err
}
if err := router.Handle("/api/v2/users/{id}", handler.New(provider.authZ.AdminAccess(provider.userHandler.GetUser), handler.OpenAPIDef{
ID: "GetUser",
Tags: []string{"users"},
Summary: "Get user by user id",
Description: "This endpoint returns the user by id",
Request: nil,
RequestContentType: "",
Response: new(authtypes.UserWithRoles),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusNotFound},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
})).Methods(http.MethodGet).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v1/user/{id}", handler.New(provider.authZ.SelfAccess(provider.userHandler.UpdateUserDeprecated), handler.OpenAPIDef{
ID: "UpdateUserDeprecated",
if err := router.Handle("/api/v1/user/{id}", handler.New(provider.authZ.SelfAccess(provider.userHandler.UpdateUser), handler.OpenAPIDef{
ID: "UpdateUser",
Tags: []string{"users"},
Summary: "Update user",
Description: "This endpoint updates the user by id",
@@ -247,23 +179,6 @@ func (provider *provider) addUserRoutes(router *mux.Router) error {
return err
}
if err := router.Handle("/api/v2/users/{id}", handler.New(provider.authZ.AdminAccess(provider.userHandler.UpdateUser), handler.OpenAPIDef{
ID: "UpdateUser",
Tags: []string{"users"},
Summary: "Update user v2",
Description: "This endpoint updates the user by id",
Request: new(types.UpdatableUser),
RequestContentType: "application/json",
Response: nil,
ResponseContentType: "",
SuccessStatusCode: http.StatusNoContent,
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
})).Methods(http.MethodPut).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v1/user/{id}", handler.New(provider.authZ.AdminAccess(provider.userHandler.DeleteUser), handler.OpenAPIDef{
ID: "DeleteUser",
Tags: []string{"users"},
@@ -349,73 +264,5 @@ func (provider *provider) addUserRoutes(router *mux.Router) error {
return err
}
if err := router.Handle("/api/v2/users/{id}/roles", handler.New(provider.authZ.AdminAccess(provider.userHandler.GetRolesByUserID), handler.OpenAPIDef{
ID: "GetRolesByUserID",
Tags: []string{"users"},
Summary: "Get user roles",
Description: "This endpoint returns the user roles by user id",
Request: nil,
RequestContentType: "",
Response: make([]*authtypes.Role, 0),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusNotFound},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
})).Methods(http.MethodGet).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v2/users/{id}/roles", handler.New(provider.authZ.AdminAccess(provider.userHandler.SetRoleByUserID), handler.OpenAPIDef{
ID: "SetRoleByUserID",
Tags: []string{"users"},
Summary: "Set user roles",
Description: "This endpoint assigns the role to the user roles by user id",
Request: new(types.PostableRole),
RequestContentType: "application/json",
Response: nil,
ResponseContentType: "",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusNotFound},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
})).Methods(http.MethodPost).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v2/users/{id}/roles/{roleId}", handler.New(provider.authZ.AdminAccess(provider.userHandler.RemoveUserRoleByRoleID), handler.OpenAPIDef{
ID: "RemoveUserRoleByUserIDAndRoleID",
Tags: []string{"users"},
Summary: "Remove a role from user",
Description: "This endpoint removes a role from the user by user id and role id",
Request: nil,
RequestContentType: "",
Response: nil,
ResponseContentType: "",
SuccessStatusCode: http.StatusNoContent,
ErrorStatusCodes: []int{http.StatusNotFound},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
})).Methods(http.MethodDelete).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v2/roles/{id}/users", handler.New(provider.authZ.AdminAccess(provider.userHandler.GetUsersByRoleID), handler.OpenAPIDef{
ID: "GetUsersByRoleID",
Tags: []string{"users"},
Summary: "Get users by role id",
Description: "This endpoint returns the users having the role by role id",
Request: nil,
RequestContentType: "",
Response: make([]*types.User, 0),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusNotFound},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
})).Methods(http.MethodGet).GetError(); err != nil {
return err
}
return nil
}

View File

@@ -53,7 +53,7 @@ type Module interface {
}
type Handler interface {
CreateAccount(http.ResponseWriter, *http.Request)
GetConnectionArtifact(http.ResponseWriter, *http.Request)
ListAccounts(http.ResponseWriter, *http.Request)
GetAccount(http.ResponseWriter, *http.Request)
UpdateAccount(http.ResponseWriter, *http.Request)

View File

@@ -1,58 +0,0 @@
package implcloudintegration
import (
"net/http"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
)
type handler struct{}
func NewHandler() cloudintegration.Handler {
return &handler{}
}
func (handler *handler) CreateAccount(writer http.ResponseWriter, request *http.Request) {
// TODO implement me
panic("implement me")
}
func (handler *handler) ListAccounts(writer http.ResponseWriter, request *http.Request) {
// TODO implement me
panic("implement me")
}
func (handler *handler) GetAccount(writer http.ResponseWriter, request *http.Request) {
// TODO implement me
panic("implement me")
}
func (handler *handler) UpdateAccount(writer http.ResponseWriter, request *http.Request) {
// TODO implement me
panic("implement me")
}
func (handler *handler) DisconnectAccount(writer http.ResponseWriter, request *http.Request) {
// TODO implement me
panic("implement me")
}
func (handler *handler) ListServicesMetadata(writer http.ResponseWriter, request *http.Request) {
// TODO implement me
panic("implement me")
}
func (handler *handler) GetService(writer http.ResponseWriter, request *http.Request) {
// TODO implement me
panic("implement me")
}
func (handler *handler) UpdateService(writer http.ResponseWriter, request *http.Request) {
// TODO implement me
panic("implement me")
}
func (handler *handler) AgentCheckIn(writer http.ResponseWriter, request *http.Request) {
// TODO implement me
panic("implement me")
}

View File

@@ -160,7 +160,7 @@ func (module *module) CreateCallbackAuthNSession(ctx context.Context, authNProvi
return "", errors.WithAdditionalf(err, "root user can only authenticate via password")
}
userRoles, err := module.userGetter.GetRolesByUserID(ctx, newUser.ID)
userRoles, err := module.userGetter.GetUserRoles(ctx, newUser.ID)
if err != nil {
return "", err
}

View File

@@ -37,7 +37,7 @@ func (module *getter) GetRootUserByOrgID(ctx context.Context, orgID valuer.UUID)
return rootUser, userRoles, nil
}
func (module *getter) ListDeprecatedUsersByOrgID(ctx context.Context, orgID valuer.UUID) ([]*types.DeprecatedUser, error) {
func (module *getter) ListByOrgID(ctx context.Context, orgID valuer.UUID) ([]*types.DeprecatedUser, error) {
users, err := module.store.ListUsersByOrgID(ctx, orgID)
if err != nil {
return nil, err
@@ -84,30 +84,13 @@ func (module *getter) ListDeprecatedUsersByOrgID(ctx context.Context, orgID valu
return deprecatedUsers, nil
}
func (module *getter) ListUsersByOrgID(ctx context.Context, orgID valuer.UUID) ([]*types.User, error) {
users, err := module.store.ListUsersByOrgID(ctx, orgID)
if err != nil {
return nil, err
}
// filter root users if feature flag `hide_root_users` is true
evalCtx := featuretypes.NewFlaggerEvaluationContext(orgID)
hideRootUsers := module.flagger.BooleanOrEmpty(ctx, flagger.FeatureHideRootUser, evalCtx)
if hideRootUsers {
users = slices.DeleteFunc(users, func(user *types.User) bool { return user.IsRoot })
}
return users, nil
}
func (module *getter) GetDeprecatedUserByOrgIDAndID(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*types.DeprecatedUser, error) {
user, err := module.store.GetByOrgIDAndID(ctx, orgID, id)
if err != nil {
return nil, err
}
userRoles, err := module.GetRolesByUserID(ctx, id)
userRoles, err := module.GetUserRoles(ctx, id)
if err != nil {
return nil, err
}
@@ -116,26 +99,18 @@ func (module *getter) GetDeprecatedUserByOrgIDAndID(ctx context.Context, orgID v
return nil, errors.New(errors.TypeUnexpected, authtypes.ErrCodeUserRolesNotFound, "no user roles entries found")
}
if userRoles[0].Role == nil {
return nil, errors.New(errors.TypeUnexpected, authtypes.ErrCodeRoleNotFound, "role not found for user role entry")
}
role := authtypes.SigNozManagedRoleToExistingLegacyRole[userRoles[0].Role.Name]
return types.NewDeprecatedUserFromUserAndRole(user, role), nil
}
func (module *getter) GetUserByOrgIDAndID(ctx context.Context, orgID valuer.UUID, userID valuer.UUID) (*types.User, error) {
return module.store.GetByOrgIDAndID(ctx, orgID, userID)
}
func (module *getter) Get(ctx context.Context, id valuer.UUID) (*types.DeprecatedUser, error) {
user, err := module.store.GetUser(ctx, id)
if err != nil {
return nil, err
}
userRoles, err := module.GetRolesByUserID(ctx, id)
userRoles, err := module.GetUserRoles(ctx, id)
if err != nil {
return nil, err
}
@@ -144,10 +119,6 @@ func (module *getter) Get(ctx context.Context, id valuer.UUID) (*types.Deprecate
return nil, errors.New(errors.TypeUnexpected, authtypes.ErrCodeUserRolesNotFound, "no user roles entries found")
}
if userRoles[0].Role == nil {
return nil, errors.New(errors.TypeUnexpected, authtypes.ErrCodeRoleNotFound, "role not found for user role entry")
}
role := authtypes.SigNozManagedRoleToExistingLegacyRole[userRoles[0].Role.Name]
return types.NewDeprecatedUserFromUserAndRole(user, role), nil
@@ -203,21 +174,11 @@ func (module *getter) GetNonDeletedUserByEmailAndOrgID(ctx context.Context, emai
}
func (module *getter) GetRolesByUserID(ctx context.Context, userID valuer.UUID) ([]*authtypes.UserRole, error) {
func (module *getter) GetUserRoles(ctx context.Context, userID valuer.UUID) ([]*authtypes.UserRole, error) {
userRoles, err := module.userRoleStore.GetUserRolesByUserID(ctx, userID)
if err != nil {
return nil, err
}
for _, ur := range userRoles {
if ur.Role == nil {
return nil, errors.New(errors.TypeUnexpected, authtypes.ErrCodeRoleNotFound, "role not found for user role entry")
}
}
return userRoles, nil
}
func (module *getter) GetUsersByOrgIDAndRoleID(ctx context.Context, orgID valuer.UUID, roleID valuer.UUID) ([]*types.User, error) {
return module.store.GetUsersByOrgIDAndRoleID(ctx, orgID, roleID)
}

View File

@@ -85,7 +85,7 @@ func (h *handler) CreateBulkInvite(rw http.ResponseWriter, r *http.Request) {
render.Success(rw, http.StatusCreated, nil)
}
func (h *handler) GetUserDeprecated(w http.ResponseWriter, r *http.Request) {
func (h *handler) GetUser(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
@@ -106,39 +106,7 @@ func (h *handler) GetUserDeprecated(w http.ResponseWriter, r *http.Request) {
render.Success(w, http.StatusOK, user)
}
func (h *handler) GetUser(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
userID := mux.Vars(r)["id"]
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(w, err)
return
}
user, err := h.getter.GetUserByOrgIDAndID(ctx, valuer.MustNewUUID(claims.OrgID), valuer.MustNewUUID(userID))
if err != nil {
render.Error(w, err)
return
}
userRoles, err := h.getter.GetRolesByUserID(ctx, user.ID)
if err != nil {
render.Error(w, err)
return
}
userWithRoles := &authtypes.UserWithRoles{
User: user,
UserRoles: userRoles,
}
render.Success(w, http.StatusOK, userWithRoles)
}
func (h *handler) GetMyUserDeprecated(w http.ResponseWriter, r *http.Request) {
func (h *handler) GetMyUser(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
@@ -157,80 +125,6 @@ func (h *handler) GetMyUserDeprecated(w http.ResponseWriter, r *http.Request) {
render.Success(w, http.StatusOK, user)
}
func (h *handler) GetMyUser(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(w, err)
return
}
user, err := h.getter.GetUserByOrgIDAndID(ctx, valuer.MustNewUUID(claims.OrgID), valuer.MustNewUUID(claims.UserID))
if err != nil {
render.Error(w, err)
return
}
userRoles, err := h.getter.GetRolesByUserID(ctx, user.ID)
if err != nil {
render.Error(w, err)
return
}
userWithRoles := &authtypes.UserWithRoles{
User: user,
UserRoles: userRoles,
}
render.Success(w, http.StatusOK, userWithRoles)
}
func (h *handler) UpdateMyUser(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(w, err)
return
}
updatableUser := new(types.UpdatableUser)
if err := json.NewDecoder(r.Body).Decode(&updatableUser); err != nil {
render.Error(w, err)
return
}
_, err = h.setter.UpdateUser(ctx, valuer.MustNewUUID(claims.OrgID), valuer.MustNewUUID(claims.UserID), updatableUser)
if err != nil {
render.Error(w, err)
return
}
render.Success(w, http.StatusNoContent, nil)
}
func (h *handler) ListUsersDeprecated(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(w, err)
return
}
users, err := h.getter.ListDeprecatedUsersByOrgID(ctx, valuer.MustNewUUID(claims.OrgID))
if err != nil {
render.Error(w, err)
return
}
render.Success(w, http.StatusOK, users)
}
func (h *handler) ListUsers(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
@@ -241,7 +135,7 @@ func (h *handler) ListUsers(w http.ResponseWriter, r *http.Request) {
return
}
users, err := h.getter.ListUsersByOrgID(ctx, valuer.MustNewUUID(claims.OrgID))
users, err := h.getter.ListByOrgID(ctx, valuer.MustNewUUID(claims.OrgID))
if err != nil {
render.Error(w, err)
return
@@ -250,7 +144,7 @@ func (h *handler) ListUsers(w http.ResponseWriter, r *http.Request) {
render.Success(w, http.StatusOK, users)
}
func (h *handler) UpdateUserDeprecated(w http.ResponseWriter, r *http.Request) {
func (h *handler) UpdateUser(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
@@ -268,7 +162,7 @@ func (h *handler) UpdateUserDeprecated(w http.ResponseWriter, r *http.Request) {
return
}
updatedUser, err := h.setter.UpdateUserDeprecated(ctx, valuer.MustNewUUID(claims.OrgID), id, &user, claims.UserID)
updatedUser, err := h.setter.UpdateUser(ctx, valuer.MustNewUUID(claims.OrgID), id, &user, claims.UserID)
if err != nil {
render.Error(w, err)
return
@@ -277,38 +171,6 @@ func (h *handler) UpdateUserDeprecated(w http.ResponseWriter, r *http.Request) {
render.Success(w, http.StatusOK, updatedUser)
}
func (h *handler) UpdateUser(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
userID := mux.Vars(r)["id"]
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(w, err)
return
}
if userID == claims.UserID {
render.Error(w, errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "users cannot call this api on self"))
return
}
updatableUser := new(types.UpdatableUser)
if err := json.NewDecoder(r.Body).Decode(&updatableUser); err != nil {
render.Error(w, err)
return
}
_, err = h.setter.UpdateUser(ctx, valuer.MustNewUUID(claims.OrgID), valuer.MustNewUUID(userID), updatableUser)
if err != nil {
render.Error(w, err)
return
}
render.Success(w, http.StatusNoContent, nil)
}
func (h *handler) DeleteUser(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
@@ -581,118 +443,3 @@ func (h *handler) RevokeAPIKey(w http.ResponseWriter, r *http.Request) {
render.Success(w, http.StatusNoContent, nil)
}
func (h *handler) GetRolesByUserID(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
userID := mux.Vars(r)["id"]
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(w, err)
return
}
user, err := h.getter.GetUserByOrgIDAndID(ctx, valuer.MustNewUUID(claims.OrgID), valuer.MustNewUUID(userID))
if err != nil {
render.Error(w, err)
return
}
userRoles, err := h.getter.GetRolesByUserID(ctx, user.ID)
if err != nil {
render.Error(w, err)
return
}
roles := make([]*authtypes.Role, len(userRoles))
for idx, userRole := range userRoles {
roles[idx] = authtypes.NewRoleFromStorableRole(userRole.Role)
}
render.Success(w, http.StatusOK, roles)
}
func (h *handler) SetRoleByUserID(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
userID := mux.Vars(r)["id"]
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(w, err)
return
}
if userID == claims.UserID {
render.Error(w, errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "users cannot call this api on self"))
return
}
postableRole := new(types.PostableRole)
if err := json.NewDecoder(r.Body).Decode(postableRole); err != nil {
render.Error(w, err)
return
}
if postableRole.Name == "" {
render.Error(w, errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "role name is required"))
return
}
if err := h.setter.AddUserRole(ctx, valuer.MustNewUUID(claims.OrgID), valuer.MustNewUUID(userID), postableRole.Name); err != nil {
render.Error(w, err)
return
}
render.Success(w, http.StatusOK, nil)
}
func (h *handler) RemoveUserRoleByRoleID(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
userID := mux.Vars(r)["id"]
roleID := mux.Vars(r)["roleId"]
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(w, err)
return
}
if userID == claims.UserID {
render.Error(w, errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "users cannot call this api on self"))
return
}
if err := h.setter.RemoveUserRole(ctx, valuer.MustNewUUID(claims.OrgID), valuer.MustNewUUID(userID), valuer.MustNewUUID(roleID)); err != nil {
render.Error(w, err)
return
}
render.Success(w, http.StatusNoContent, nil)
}
func (h *handler) GetUsersByRoleID(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
roleID := mux.Vars(r)["id"]
claims, err := authtypes.ClaimsFromContext(ctx)
if err != nil {
render.Error(w, err)
return
}
users, err := h.getter.GetUsersByOrgIDAndRoleID(ctx, valuer.MustNewUUID(claims.OrgID), valuer.MustNewUUID(roleID))
if err != nil {
render.Error(w, err)
return
}
render.Success(w, http.StatusOK, users)
}

View File

@@ -133,7 +133,7 @@ func (s *service) createOrPromoteRootUser(ctx context.Context, orgID valuer.UUID
}
if existingUser != nil {
userRoles, err := s.getter.GetRolesByUserID(ctx, existingUser.ID)
userRoles, err := s.getter.GetUserRoles(ctx, existingUser.ID)
if err != nil {
return err
}
@@ -156,7 +156,9 @@ func (s *service) createOrPromoteRootUser(ctx context.Context, orgID valuer.UUID
existingUser.PromoteToRoot()
err = s.store.RunInTx(ctx, func(ctx context.Context) error {
if err := s.setter.UpdateAnyUser(ctx, orgID, existingUser); err != nil {
// update users table
deprecatedUser := types.NewDeprecatedUserFromUserAndRole(existingUser, types.RoleAdmin)
if err := s.setter.UpdateAnyUser(ctx, orgID, deprecatedUser); err != nil {
return err
}
@@ -199,7 +201,8 @@ func (s *service) updateExistingRootUser(ctx context.Context, orgID valuer.UUID,
if existingRoot.Email != s.config.Email {
existingRoot.UpdateEmail(s.config.Email)
if err := s.setter.UpdateAnyUser(ctx, orgID, existingRoot); err != nil {
deprecatedUser := types.NewDeprecatedUserFromUserAndRole(existingRoot, types.RoleAdmin)
if err := s.setter.UpdateAnyUser(ctx, orgID, deprecatedUser); err != nil {
return err
}
}

View File

@@ -220,7 +220,7 @@ func (module *setter) CreateUser(ctx context.Context, user *types.User, opts ...
return nil
}
func (module *setter) UpdateUserDeprecated(ctx context.Context, orgID valuer.UUID, id string, user *types.DeprecatedUser, updatedBy string) (*types.DeprecatedUser, error) {
func (module *setter) UpdateUser(ctx context.Context, orgID valuer.UUID, id string, user *types.DeprecatedUser, updatedBy string) (*types.DeprecatedUser, error) {
existingUser, err := module.getter.GetDeprecatedUserByOrgIDAndID(ctx, orgID, valuer.MustNewUUID(id))
if err != nil {
return nil, err
@@ -265,7 +265,7 @@ func (module *setter) UpdateUserDeprecated(ctx context.Context, orgID valuer.UUI
existingUser.Update(user.DisplayName, user.Role)
// update the user - idempotent (this does analytics too so keeping it outside txn)
if err := module.UpdateAnyUserDeprecated(ctx, orgID, existingUser); err != nil {
if err := module.UpdateAnyUser(ctx, orgID, existingUser); err != nil {
return nil, err
}
@@ -291,46 +291,7 @@ func (module *setter) UpdateUserDeprecated(ctx context.Context, orgID valuer.UUI
return existingUser, nil
}
func (module *setter) UpdateUser(ctx context.Context, orgID valuer.UUID, userID valuer.UUID, updatable *types.UpdatableUser) (*types.User, error) {
existingUser, err := module.getter.GetUserByOrgIDAndID(ctx, orgID, userID)
if err != nil {
return nil, err
}
if err := existingUser.ErrIfRoot(); err != nil {
return nil, errors.WithAdditionalf(err, "cannot update root user")
}
if err := existingUser.ErrIfDeleted(); err != nil {
return nil, errors.WithAdditionalf(err, "cannot update deleted user")
}
existingUser.Update(updatable.DisplayName)
if err := module.UpdateAnyUser(ctx, orgID, existingUser); err != nil {
return nil, err
}
return existingUser, nil
}
func (module *setter) UpdateAnyUser(ctx context.Context, orgID valuer.UUID, user *types.User) error {
if err := module.store.UpdateUser(ctx, orgID, user); err != nil {
return err
}
if err := module.tokenizer.DeleteIdentity(ctx, user.ID); err != nil {
return err
}
// stats collector things
traits := types.NewTraitsFromUser(user)
module.analytics.IdentifyUser(ctx, user.OrgID.String(), user.ID.String(), traits)
module.analytics.TrackUser(ctx, user.OrgID.String(), user.ID.String(), "User Updated", traits)
return nil
}
func (module *setter) UpdateAnyUserDeprecated(ctx context.Context, orgID valuer.UUID, deprecateUser *types.DeprecatedUser) error {
func (module *setter) UpdateAnyUser(ctx context.Context, orgID valuer.UUID, deprecateUser *types.DeprecatedUser) error {
user := types.NewUserFromDeprecatedUser(deprecateUser)
if err := module.store.UpdateUser(ctx, orgID, user); err != nil {
return err
@@ -374,7 +335,7 @@ func (module *setter) DeleteUser(ctx context.Context, orgID valuer.UUID, id stri
return errors.New(errors.TypeForbidden, errors.CodeForbidden, "cannot self delete")
}
userRoles, err := module.getter.GetRolesByUserID(ctx, user.ID)
userRoles, err := module.getter.GetUserRoles(ctx, user.ID)
if err != nil {
return err
}
@@ -552,7 +513,7 @@ func (module *setter) UpdatePasswordByResetPasswordToken(ctx context.Context, to
return err
}
userRoles, err := module.getter.GetRolesByUserID(ctx, user.ID)
userRoles, err := module.getter.GetUserRoles(ctx, user.ID)
if err != nil {
return err
}
@@ -840,121 +801,16 @@ func (module *setter) activatePendingUser(ctx context.Context, user *types.User,
func (module *setter) UpdateUserRoles(ctx context.Context, orgID, userID valuer.UUID, finalRoleNames []string) error {
return module.store.RunInTx(ctx, func(ctx context.Context) error {
// delete old user_role entries
// delete old user_role entries and create new ones from SSO
if err := module.userRoleStore.DeleteUserRoles(ctx, userID); err != nil {
return err
}
// create fresh ones only if there are roles to assign
if len(finalRoleNames) > 0 {
return module.createUserRoleEntries(ctx, orgID, userID, finalRoleNames)
}
return nil
// create fresh ones
return module.createUserRoleEntries(ctx, orgID, userID, finalRoleNames)
})
}
func (module *setter) AddUserRole(ctx context.Context, orgID, userID valuer.UUID, roleName string) error {
existingUser, err := module.getter.GetUserByOrgIDAndID(ctx, orgID, userID)
if err != nil {
return err
}
if err := existingUser.ErrIfRoot(); err != nil {
return errors.WithAdditionalf(err, "cannot add role for root user")
}
if err := existingUser.ErrIfDeleted(); err != nil {
return errors.WithAdditionalf(err, "cannot add role for deleted user")
}
// validate that the role name exists
foundRoles, err := module.authz.ListByOrgIDAndNames(ctx, orgID, []string{roleName})
if err != nil {
return err
}
if len(foundRoles) != 1 {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "role name not found: %s", roleName)
}
// check if user already has this role
existingUserRoles, err := module.getter.GetRolesByUserID(ctx, existingUser.ID)
if err != nil {
return err
}
for _, userRole := range existingUserRoles {
if userRole.Role != nil && userRole.Role.Name == roleName {
return nil // role already assigned no-op
}
}
// grant via authz (idempotent)
if err := module.authz.Grant(
ctx,
orgID,
[]string{roleName},
authtypes.MustNewSubject(authtypes.TypeableUser, existingUser.ID.StringValue(), existingUser.OrgID, nil),
); err != nil {
return err
}
// create user_role entry
userRoles := authtypes.NewUserRoles(userID, foundRoles)
if err := module.userRoleStore.CreateUserRoles(ctx, userRoles); err != nil {
return err
}
return module.tokenizer.DeleteIdentity(ctx, userID)
}
func (module *setter) RemoveUserRole(ctx context.Context, orgID, userID valuer.UUID, roleID valuer.UUID) error {
existingUser, err := module.getter.GetUserByOrgIDAndID(ctx, orgID, userID)
if err != nil {
return err
}
if err := existingUser.ErrIfRoot(); err != nil {
return errors.WithAdditionalf(err, "cannot remove role for root user")
}
if err := existingUser.ErrIfDeleted(); err != nil {
return errors.WithAdditionalf(err, "cannot remove role for deleted user")
}
// resolve role name for authz revoke
existingUserRoles, err := module.getter.GetRolesByUserID(ctx, existingUser.ID)
if err != nil {
return err
}
var roleName string
for _, ur := range existingUserRoles {
if ur.Role != nil && ur.RoleID == roleID {
roleName = ur.Role.Name
break
}
}
if roleName == "" {
return errors.Newf(errors.TypeNotFound, authtypes.ErrCodeUserRolesNotFound, "role %s not found for user %s", roleID, userID)
}
// revoke authz grant
if err := module.authz.Revoke(
ctx,
orgID,
[]string{roleName},
authtypes.MustNewSubject(authtypes.TypeableUser, existingUser.ID.StringValue(), existingUser.OrgID, nil),
); err != nil {
return err
}
if err := module.userRoleStore.DeleteUserRoleByUserIDAndRoleID(ctx, userID, roleID); err != nil {
return err
}
return module.tokenizer.DeleteIdentity(ctx, userID)
}
func roleNamesFromUserRoles(userRoles []*authtypes.UserRole) []string {
names := make([]string, 0, len(userRoles))
for _, ur := range userRoles {

View File

@@ -667,22 +667,3 @@ func (store *store) GetUsersByEmailsOrgIDAndStatuses(ctx context.Context, orgID
return users, nil
}
func (store *store) GetUsersByOrgIDAndRoleID(ctx context.Context, orgID valuer.UUID, roleID valuer.UUID) ([]*types.User, error) {
users := []*types.User{}
err := store.
sqlstore.
BunDBCtx(ctx).
NewSelect().
Model(&users).
Join(`JOIN user_role ON user_role.user_id = "users".id`).
Where(`"users".org_id = ?`, orgID).
Where("user_role.role_id = ?", roleID).
Scan(ctx)
if err != nil {
return nil, err
}
return users, nil
}

View File

@@ -65,21 +65,6 @@ func (store *userRoleStore) DeleteUserRoles(ctx context.Context, userID valuer.U
return nil
}
func (store *userRoleStore) DeleteUserRoleByUserIDAndRoleID(ctx context.Context, userID valuer.UUID, roleID valuer.UUID) error {
_, err := store.sqlstore.
BunDBCtx(ctx).
NewDelete().
Model(new(authtypes.UserRole)).
Where("user_id = ?", userID).
Where("role_id = ?", roleID).
Exec(ctx)
if err != nil {
return err
}
return nil
}
func (store *userRoleStore) GetUserRolesByUserID(ctx context.Context, userID valuer.UUID) ([]*authtypes.UserRole, error) {
userRoles := make([]*authtypes.UserRole, 0)

View File

@@ -34,12 +34,10 @@ type Setter interface {
// Initiate forgot password flow for a user
ForgotPassword(ctx context.Context, orgID valuer.UUID, email valuer.Email, frontendBaseURL string) error
UpdateUserDeprecated(ctx context.Context, orgID valuer.UUID, id string, user *types.DeprecatedUser, updatedBy string) (*types.DeprecatedUser, error)
UpdateUser(ctx context.Context, orgID valuer.UUID, userID valuer.UUID, updatable *types.UpdatableUser) (*types.User, error)
UpdateUser(ctx context.Context, orgID valuer.UUID, id string, user *types.DeprecatedUser, updatedBy string) (*types.DeprecatedUser, error)
// UpdateAnyUser updates a user and persists the changes to the database along with the analytics and identity deletion.
UpdateAnyUserDeprecated(ctx context.Context, orgID valuer.UUID, deprecateUser *types.DeprecatedUser) error
UpdateAnyUser(ctx context.Context, orgID valuer.UUID, user *types.User) error
UpdateAnyUser(ctx context.Context, orgID valuer.UUID, user *types.DeprecatedUser) error
DeleteUser(ctx context.Context, orgID valuer.UUID, id string, deletedBy string) error
// invite
@@ -54,8 +52,6 @@ type Setter interface {
// Roles
UpdateUserRoles(ctx context.Context, orgID, userID valuer.UUID, finalRoleNames []string) error
AddUserRole(ctx context.Context, orgID, userID valuer.UUID, roleName string) error
RemoveUserRole(ctx context.Context, orgID, userID valuer.UUID, roleID valuer.UUID) error
statsreporter.StatsCollector
}
@@ -64,13 +60,11 @@ type Getter interface {
// Get root user by org id.
GetRootUserByOrgID(context.Context, valuer.UUID) (*types.User, []*authtypes.UserRole, error)
// Get gets the users based on the given org id
ListDeprecatedUsersByOrgID(context.Context, valuer.UUID) ([]*types.DeprecatedUser, error)
ListUsersByOrgID(ctx context.Context, orgID valuer.UUID) ([]*types.User, error)
// Get gets the users based on the given id
ListByOrgID(context.Context, valuer.UUID) ([]*types.DeprecatedUser, error)
// Get deprecated user object by orgID and id.
GetDeprecatedUserByOrgIDAndID(context.Context, valuer.UUID, valuer.UUID) (*types.DeprecatedUser, error)
GetUserByOrgIDAndID(ctx context.Context, orgID valuer.UUID, userID valuer.UUID) (*types.User, error)
// Get user by id.
Get(context.Context, valuer.UUID) (*types.DeprecatedUser, error)
@@ -91,10 +85,7 @@ type Getter interface {
GetNonDeletedUserByEmailAndOrgID(ctx context.Context, email valuer.Email, orgID valuer.UUID) (*types.User, error)
// Gets user_role with roles entries from db
GetRolesByUserID(ctx context.Context, userID valuer.UUID) ([]*authtypes.UserRole, error)
// Gets all the user with role using role id in an org id
GetUsersByOrgIDAndRoleID(ctx context.Context, orgID valuer.UUID, roleID valuer.UUID) ([]*types.User, error)
GetUserRoles(ctx context.Context, userID valuer.UUID) ([]*authtypes.UserRole, error)
}
type Handler interface {
@@ -102,21 +93,11 @@ type Handler interface {
CreateInvite(http.ResponseWriter, *http.Request)
CreateBulkInvite(http.ResponseWriter, *http.Request)
// users
ListUsersDeprecated(http.ResponseWriter, *http.Request)
ListUsers(http.ResponseWriter, *http.Request)
UpdateUserDeprecated(http.ResponseWriter, *http.Request)
UpdateUser(http.ResponseWriter, *http.Request)
DeleteUser(http.ResponseWriter, *http.Request)
GetUserDeprecated(http.ResponseWriter, *http.Request)
GetUser(http.ResponseWriter, *http.Request)
GetMyUserDeprecated(http.ResponseWriter, *http.Request)
GetMyUser(http.ResponseWriter, *http.Request)
UpdateMyUser(http.ResponseWriter, *http.Request)
GetRolesByUserID(http.ResponseWriter, *http.Request)
SetRoleByUserID(http.ResponseWriter, *http.Request)
RemoveUserRoleByRoleID(http.ResponseWriter, *http.Request)
GetUsersByRoleID(http.ResponseWriter, *http.Request)
// Reset Password
GetResetPasswordToken(http.ResponseWriter, *http.Request)

View File

@@ -3,7 +3,6 @@ package prometheus
import (
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
)
@@ -21,9 +20,6 @@ type Config struct {
//
// If not set, the prometheus default is used (currently 5m).
LookbackDelta time.Duration `mapstructure:"lookback_delta"`
// Timeout is the maximum time a query is allowed to run before being aborted.
Timeout time.Duration `mapstructure:"timeout"`
}
func NewConfigFactory() factory.ConfigFactory {
@@ -37,14 +33,10 @@ func newConfig() factory.Config {
Path: "",
MaxConcurrent: 20,
},
Timeout: 2 * time.Minute,
}
}
func (c Config) Validate() error {
if c.Timeout <= 0 {
return errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "prometheus::timeout must be greater than 0")
}
return nil
}

View File

@@ -0,0 +1,3 @@
package prometheus
const FingerprintAsPromLabelName = "fingerprint"

View File

@@ -2,6 +2,7 @@ package prometheus
import (
"log/slog"
"time"
"github.com/prometheus/prometheus/promql"
)
@@ -20,7 +21,7 @@ func NewEngine(logger *slog.Logger, cfg Config) *Engine {
Logger: logger,
Reg: nil,
MaxSamples: 5_0000_000,
Timeout: cfg.Timeout,
Timeout: 2 * time.Minute,
ActiveQueryTracker: activeQueryTracker,
LookbackDelta: cfg.LookbackDelta,
})

View File

@@ -6,8 +6,6 @@ import (
"github.com/prometheus/prometheus/promql"
)
const FingerprintAsPromLabelName string = "fingerprint"
func RemoveExtraLabels(res *promql.Result, labelsToRemove ...string) error {
if len(labelsToRemove) == 0 || res == nil {
return nil

View File

@@ -4,7 +4,6 @@ import (
"context"
"encoding/base64"
"fmt"
"log/slog"
"strconv"
"strings"
"time"
@@ -12,7 +11,6 @@ import (
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/telemetrytraces"
"github.com/SigNoz/signoz/pkg/types/ctxtypes"
"github.com/SigNoz/signoz/pkg/types/instrumentationtypes"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
@@ -20,7 +18,6 @@ import (
)
type builderQuery[T any] struct {
logger *slog.Logger
telemetryStore telemetrystore.TelemetryStore
stmtBuilder qbtypes.StatementBuilder[T]
spec qbtypes.QueryBuilderQuery[T]
@@ -34,7 +31,6 @@ type builderQuery[T any] struct {
var _ qbtypes.Query = (*builderQuery[any])(nil)
func newBuilderQuery[T any](
logger *slog.Logger,
telemetryStore telemetrystore.TelemetryStore,
stmtBuilder qbtypes.StatementBuilder[T],
spec qbtypes.QueryBuilderQuery[T],
@@ -43,7 +39,6 @@ func newBuilderQuery[T any](
variables map[string]qbtypes.VariableItem,
) *builderQuery[T] {
return &builderQuery[T]{
logger: logger,
telemetryStore: telemetryStore,
stmtBuilder: stmtBuilder,
spec: spec,
@@ -310,45 +305,6 @@ func (q *builderQuery[T]) executeWindowList(ctx context.Context) (*qbtypes.Resul
totalBytes := uint64(0)
start := time.Now()
// Check if filter contains trace_id(s) and optimize time range if needed
if q.spec.Signal == telemetrytypes.SignalTraces &&
q.spec.Filter != nil && q.spec.Filter.Expression != "" {
traceIDs, found := telemetrytraces.ExtractTraceIDsFromFilter(q.spec.Filter.Expression)
if found && len(traceIDs) > 0 {
finder := telemetrytraces.NewTraceTimeRangeFinder(q.telemetryStore)
traceStart, traceEnd, ok := finder.GetTraceTimeRangeMulti(ctx, traceIDs)
traceStartMS := uint64(traceStart) / 1_000_000
traceEndMS := uint64(traceEnd) / 1_000_000
if !ok {
q.logger.DebugContext(ctx, "failed to get trace time range", slog.Any("trace_ids", traceIDs))
} else if traceStartMS > 0 && traceEndMS > 0 {
// no overlap — nothing to return
if uint64(traceStartMS) > toMS || uint64(traceEndMS) < fromMS {
return &qbtypes.Result{
Type: qbtypes.RequestTypeRaw,
Value: &qbtypes.RawData{
QueryName: q.spec.Name,
},
Stats: qbtypes.ExecStats{
DurationMS: uint64(time.Since(start).Milliseconds()),
},
}, nil
}
// clamp window to trace time range before bucketing
if uint64(traceStartMS) > fromMS {
fromMS = uint64(traceStartMS)
}
if uint64(traceEndMS) < toMS {
toMS = uint64(traceEndMS)
}
q.logger.DebugContext(ctx, "optimized time range for traces", slog.Any("trace_ids", traceIDs), slog.Uint64("start", fromMS), slog.Uint64("end", toMS))
}
}
}
// Get buckets and reverse them for ascending order
buckets := makeBuckets(fromMS, toMS)
if isAsc {

View File

@@ -68,6 +68,7 @@ func readAsTimeSeries(rows driver.Rows, queryWindow *qbtypes.TimeRange, step qbt
key string // deterministic join of label values
}
seriesMap := map[sKey]*qbtypes.TimeSeries{}
var keyOrder []sKey // preserves ClickHouse row-arrival order
stepMs := uint64(step.Duration.Milliseconds())
@@ -218,6 +219,7 @@ func readAsTimeSeries(rows driver.Rows, queryWindow *qbtypes.TimeRange, step qbt
if !ok {
series = &qbtypes.TimeSeries{Labels: lblObjs}
seriesMap[key] = series
keyOrder = append(keyOrder, key)
}
series.Values = append(series.Values, &qbtypes.TimeSeriesValue{
Timestamp: ts,
@@ -249,8 +251,8 @@ func readAsTimeSeries(rows driver.Rows, queryWindow *qbtypes.TimeRange, step qbt
Alias: "__result_" + strconv.Itoa(i),
}
}
for k, s := range seriesMap {
buckets[k.agg].Series = append(buckets[k.agg].Series, s)
for _, k := range keyOrder {
buckets[k.agg].Series = append(buckets[k.agg].Series, seriesMap[k])
}
var nonEmpty []*qbtypes.AggregationBucket

View File

@@ -188,22 +188,6 @@ func postProcessMetricQuery(
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
req *qbtypes.QueryRangeRequest,
) *qbtypes.Result {
config := query.Aggregations[0]
spaceAggOrderBy := fmt.Sprintf("%s(%s)", config.SpaceAggregation.StringValue(), config.MetricName)
timeAggOrderBy := fmt.Sprintf("%s(%s)", config.TimeAggregation.StringValue(), config.MetricName)
timeSpaceAggOrderBy := fmt.Sprintf("%s(%s(%s))", config.SpaceAggregation.StringValue(), config.TimeAggregation.StringValue(), config.MetricName)
for idx := range query.Order {
if query.Order[idx].Key.Name == spaceAggOrderBy ||
query.Order[idx].Key.Name == timeAggOrderBy ||
query.Order[idx].Key.Name == timeSpaceAggOrderBy {
query.Order[idx].Key.Name = qbtypes.DefaultOrderByKey
}
}
result = q.applySeriesLimit(result, query.Limit, query.Order)
if len(query.Functions) > 0 {
step := query.StepInterval.Duration.Milliseconds()
functions := q.prepareFillZeroArgsWithStep(query.Functions, req, step)

View File

@@ -36,28 +36,6 @@ var unquotedDottedNamePattern = regexp.MustCompile(`(?:^|[{,(\s])([a-zA-Z_][a-zA
// This is a common mistake when migrating to UTF-8 syntax.
var quotedMetricOutsideBracesPattern = regexp.MustCompile(`"([^"]+)"\s*\{`)
// tryEnhancePromQLExecError attempts to convert a PromQL execution error into
// a properly typed error. Returns nil if the error is not a recognized execution error.
func tryEnhancePromQLExecError(execErr error) error {
var eqc promql.ErrQueryCanceled
var eqt promql.ErrQueryTimeout
var es promql.ErrStorage
switch {
case errors.As(execErr, &eqc):
return errors.Newf(errors.TypeCanceled, errors.CodeCanceled, "query canceled").WithAdditional(eqc.Error())
case errors.As(execErr, &eqt):
return errors.Newf(errors.TypeTimeout, errors.CodeTimeout, "query timed out").WithAdditional(eqt.Error())
case errors.Is(execErr, context.DeadlineExceeded):
return errors.Newf(errors.TypeTimeout, errors.CodeTimeout, "query timed out")
case errors.Is(execErr, context.Canceled):
return errors.Newf(errors.TypeCanceled, errors.CodeCanceled, "query canceled")
case errors.As(execErr, &es):
return errors.Newf(errors.TypeInternal, errors.CodeInternal, "query execution error: %v", execErr)
default:
return nil
}
}
// enhancePromQLError adds helpful context to PromQL parse errors,
// particularly for UTF-8 syntax migration issues where metric and label
// names containing dots need to be quoted.
@@ -235,20 +213,27 @@ func (q *promqlQuery) Execute(ctx context.Context) (*qbv5.Result, error) {
time.Unix(0, end),
q.query.Step.Duration,
)
if err != nil {
// NewRangeQuery can fail with execution errors (e.g. context deadline exceeded)
// during the query queue/scheduling stage, not just parse errors.
if err := tryEnhancePromQLExecError(err); err != nil {
return nil, err
}
if err != nil {
return nil, enhancePromQLError(query, err)
}
res := qry.Exec(ctx)
if res.Err != nil {
if err := tryEnhancePromQLExecError(res.Err); err != nil {
return nil, err
var eqc promql.ErrQueryCanceled
var eqt promql.ErrQueryTimeout
var es promql.ErrStorage
switch {
case errors.As(res.Err, &eqc):
return nil, errors.Newf(errors.TypeCanceled, errors.CodeCanceled, "query canceled")
case errors.As(res.Err, &eqt):
return nil, errors.Newf(errors.TypeTimeout, errors.CodeTimeout, "query timeout")
case errors.As(res.Err, &es):
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "query execution error: %v", res.Err)
}
if errors.Is(res.Err, context.Canceled) {
return nil, errors.Newf(errors.TypeCanceled, errors.CodeCanceled, "query canceled")
}
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "query execution error: %v", res.Err)

View File

@@ -353,13 +353,13 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
case qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]:
spec.ShiftBy = extractShiftFromBuilderQuery(spec)
timeRange := adjustTimeRangeForShift(spec, qbtypes.TimeRange{From: req.Start, To: req.End}, req.RequestType)
bq := newBuilderQuery(q.logger, q.telemetryStore, q.traceStmtBuilder, spec, timeRange, req.RequestType, tmplVars)
bq := newBuilderQuery(q.telemetryStore, q.traceStmtBuilder, spec, timeRange, req.RequestType, tmplVars)
queries[spec.Name] = bq
steps[spec.Name] = spec.StepInterval
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]:
spec.ShiftBy = extractShiftFromBuilderQuery(spec)
timeRange := adjustTimeRangeForShift(spec, qbtypes.TimeRange{From: req.Start, To: req.End}, req.RequestType)
bq := newBuilderQuery(q.logger, q.telemetryStore, q.logStmtBuilder, spec, timeRange, req.RequestType, tmplVars)
bq := newBuilderQuery(q.telemetryStore, q.logStmtBuilder, spec, timeRange, req.RequestType, tmplVars)
queries[spec.Name] = bq
steps[spec.Name] = spec.StepInterval
case qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]:
@@ -397,9 +397,9 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
if spec.Source == telemetrytypes.SourceMeter {
event.Source = telemetrytypes.SourceMeter.StringValue()
bq = newBuilderQuery(q.logger, q.telemetryStore, q.meterStmtBuilder, spec, timeRange, req.RequestType, tmplVars)
bq = newBuilderQuery(q.telemetryStore, q.meterStmtBuilder, spec, timeRange, req.RequestType, tmplVars)
} else {
bq = newBuilderQuery(q.logger, q.telemetryStore, q.metricStmtBuilder, spec, timeRange, req.RequestType, tmplVars)
bq = newBuilderQuery(q.telemetryStore, q.metricStmtBuilder, spec, timeRange, req.RequestType, tmplVars)
}
queries[spec.Name] = bq
@@ -509,7 +509,7 @@ func (q *querier) QueryRawStream(ctx context.Context, orgID valuer.UUID, req *qb
case <-tick:
// timestamp end is not specified here
timeRange := adjustTimeRangeForShift(spec, qbtypes.TimeRange{From: tsStart}, req.RequestType)
bq := newBuilderQuery(q.logger, q.telemetryStore, q.logStmtBuilder, spec, timeRange, req.RequestType, map[string]qbtypes.VariableItem{
bq := newBuilderQuery(q.telemetryStore, q.logStmtBuilder, spec, timeRange, req.RequestType, map[string]qbtypes.VariableItem{
"id": {
Value: updatedLogID,
},
@@ -801,22 +801,22 @@ func (q *querier) createRangedQuery(originalQuery qbtypes.Query, timeRange qbtyp
specCopy := qt.spec.Copy()
specCopy.ShiftBy = extractShiftFromBuilderQuery(specCopy)
adjustedTimeRange := adjustTimeRangeForShift(specCopy, timeRange, qt.kind)
return newBuilderQuery(q.logger, q.telemetryStore, q.traceStmtBuilder, specCopy, adjustedTimeRange, qt.kind, qt.variables)
return newBuilderQuery(q.telemetryStore, q.traceStmtBuilder, specCopy, adjustedTimeRange, qt.kind, qt.variables)
case *builderQuery[qbtypes.LogAggregation]:
specCopy := qt.spec.Copy()
specCopy.ShiftBy = extractShiftFromBuilderQuery(specCopy)
adjustedTimeRange := adjustTimeRangeForShift(specCopy, timeRange, qt.kind)
return newBuilderQuery(q.logger, q.telemetryStore, q.logStmtBuilder, specCopy, adjustedTimeRange, qt.kind, qt.variables)
return newBuilderQuery(q.telemetryStore, q.logStmtBuilder, specCopy, adjustedTimeRange, qt.kind, qt.variables)
case *builderQuery[qbtypes.MetricAggregation]:
specCopy := qt.spec.Copy()
specCopy.ShiftBy = extractShiftFromBuilderQuery(specCopy)
adjustedTimeRange := adjustTimeRangeForShift(specCopy, timeRange, qt.kind)
if qt.spec.Source == telemetrytypes.SourceMeter {
return newBuilderQuery(q.logger, q.telemetryStore, q.meterStmtBuilder, specCopy, adjustedTimeRange, qt.kind, qt.variables)
return newBuilderQuery(q.telemetryStore, q.meterStmtBuilder, specCopy, adjustedTimeRange, qt.kind, qt.variables)
}
return newBuilderQuery(q.logger, q.telemetryStore, q.metricStmtBuilder, specCopy, adjustedTimeRange, qt.kind, qt.variables)
return newBuilderQuery(q.telemetryStore, q.metricStmtBuilder, specCopy, adjustedTimeRange, qt.kind, qt.variables)
case *traceOperatorQuery:
specCopy := qt.spec.Copy()
return &traceOperatorQuery{

View File

@@ -1407,7 +1407,7 @@ func Test_querier_Traces_runWindowBasedListQueryDesc(t *testing.T) {
slog.Default(),
nil,
telemetryStore,
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore),
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore),
"",
time.Duration(time.Second),
nil,
@@ -1633,7 +1633,7 @@ func Test_querier_Traces_runWindowBasedListQueryAsc(t *testing.T) {
slog.Default(),
nil,
telemetryStore,
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore),
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore),
"",
time.Duration(time.Second),
nil,
@@ -1934,7 +1934,7 @@ func Test_querier_Logs_runWindowBasedListQueryDesc(t *testing.T) {
slog.Default(),
nil,
telemetryStore,
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore),
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore),
"",
time.Duration(time.Second),
nil,
@@ -2162,7 +2162,7 @@ func Test_querier_Logs_runWindowBasedListQueryAsc(t *testing.T) {
slog.Default(),
nil,
telemetryStore,
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore),
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore),
"",
time.Duration(time.Second),
nil,

View File

@@ -1459,7 +1459,7 @@ func Test_querier_Traces_runWindowBasedListQueryDesc(t *testing.T) {
slog.Default(),
nil,
telemetryStore,
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore),
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore),
"",
time.Duration(time.Second),
nil,
@@ -1685,7 +1685,7 @@ func Test_querier_Traces_runWindowBasedListQueryAsc(t *testing.T) {
slog.Default(),
nil,
telemetryStore,
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore),
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore),
"",
time.Duration(time.Second),
nil,
@@ -1985,7 +1985,7 @@ func Test_querier_Logs_runWindowBasedListQueryDesc(t *testing.T) {
slog.Default(),
nil,
telemetryStore,
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore),
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore),
"",
time.Duration(time.Second),
nil,
@@ -2213,7 +2213,7 @@ func Test_querier_Logs_runWindowBasedListQueryAsc(t *testing.T) {
slog.Default(),
nil,
telemetryStore,
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore),
prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore),
"",
time.Duration(time.Second),
nil,

View File

@@ -698,7 +698,7 @@ func TestBaseRule_FilterNewSeries(t *testing.T) {
slog.Default(),
nil,
telemetryStore,
prometheustest.New(context.Background(), settings, prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore),
prometheustest.New(context.Background(), settings, prometheus.Config{}, telemetryStore),
"",
time.Second,
nil,

View File

@@ -253,7 +253,7 @@ func TestManager_TestNotification_SendUnmatched_PromRule(t *testing.T) {
WillReturnRows(samplesRows)
// Create Prometheus provider for this test
promProvider = prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, store)
promProvider = prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, store)
},
ManagerOptionsHook: func(opts *ManagerOptions) {
// Set Prometheus provider for PromQL queries

View File

@@ -99,7 +99,7 @@ func NewTestManager(t *testing.T, testOpts *TestManagerOptions) *Manager {
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
providerSettings := instrumentationtest.New().ToProviderSettings()
prometheus := prometheustest.New(context.Background(), providerSettings, prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore)
prometheus := prometheustest.New(context.Background(), providerSettings, prometheus.Config{}, telemetryStore)
reader := clickhouseReader.NewReader(
instrumentationtest.New().Logger(),
nil,

View File

@@ -940,7 +940,7 @@ func TestPromRuleUnitCombinations(t *testing.T) {
).
WillReturnRows(samplesRows)
promProvider := prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore)
promProvider := prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore)
postableRule.RuleCondition.CompareOp = ruletypes.CompareOp(c.compareOp)
postableRule.RuleCondition.MatchType = ruletypes.MatchType(c.matchType)
@@ -1061,7 +1061,7 @@ func _Enable_this_after_9146_issue_fix_is_merged_TestPromRuleNoData(t *testing.T
WithArgs("test_metric", "__name__", "test_metric").
WillReturnRows(fingerprintRows)
promProvider := prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore)
promProvider := prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore)
var target float64 = 0
postableRule.RuleCondition.Thresholds = &ruletypes.RuleThresholdData{
@@ -1281,7 +1281,7 @@ func TestMultipleThresholdPromRule(t *testing.T) {
).
WillReturnRows(samplesRows)
promProvider := prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore)
promProvider := prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore)
postableRule.RuleCondition.CompareOp = ruletypes.CompareOp(c.compareOp)
postableRule.RuleCondition.MatchType = ruletypes.MatchType(c.matchType)
@@ -1441,7 +1441,7 @@ func TestPromRule_NoData(t *testing.T) {
promProvider := prometheustest.New(
context.Background(),
instrumentationtest.New().ToProviderSettings(),
prometheus.Config{Timeout: 2 * time.Minute},
prometheus.Config{},
telemetryStore,
)
defer func() {
@@ -1590,7 +1590,7 @@ func TestPromRule_NoData_AbsentFor(t *testing.T) {
promProvider := prometheustest.New(
context.Background(),
instrumentationtest.New().ToProviderSettings(),
prometheus.Config{Timeout: 2 * time.Minute},
prometheus.Config{},
telemetryStore,
)
defer func() {
@@ -1748,7 +1748,7 @@ func TestPromRuleEval_RequireMinPoints(t *testing.T) {
promProvider := prometheustest.New(
context.Background(),
instrumentationtest.New().ToProviderSettings(),
prometheus.Config{Timeout: 2 * time.Minute, LookbackDelta: lookBackDelta},
prometheus.Config{LookbackDelta: lookBackDelta},
telemetryStore,
)
defer func() {

View File

@@ -779,7 +779,7 @@ func TestThresholdRuleUnitCombinations(t *testing.T) {
},
)
require.NoError(t, err)
reader := clickhouseReader.NewReader(slog.Default(), nil, telemetryStore, prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore), "", time.Duration(time.Second), nil, readerCache, options)
reader := clickhouseReader.NewReader(slog.Default(), nil, telemetryStore, prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore), "", time.Duration(time.Second), nil, readerCache, options)
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, reader, nil, logger)
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
"signoz_calls_total": {
@@ -894,7 +894,7 @@ func TestThresholdRuleNoData(t *testing.T) {
)
assert.NoError(t, err)
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
reader := clickhouseReader.NewReader(slog.Default(), nil, telemetryStore, prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore), "", time.Duration(time.Second), nil, readerCache, options)
reader := clickhouseReader.NewReader(slog.Default(), nil, telemetryStore, prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore), "", time.Duration(time.Second), nil, readerCache, options)
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, reader, nil, logger)
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
@@ -1014,7 +1014,7 @@ func TestThresholdRuleTracesLink(t *testing.T) {
}
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
reader := clickhouseReader.NewReader(slog.Default(), nil, telemetryStore, prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore), "", time.Duration(time.Second), nil, nil, options)
reader := clickhouseReader.NewReader(slog.Default(), nil, telemetryStore, prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore), "", time.Duration(time.Second), nil, nil, options)
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, reader, nil, logger)
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
@@ -1151,7 +1151,7 @@ func TestThresholdRuleLogsLink(t *testing.T) {
}
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
reader := clickhouseReader.NewReader(slog.Default(), nil, telemetryStore, prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore), "", time.Duration(time.Second), nil, nil, options)
reader := clickhouseReader.NewReader(slog.Default(), nil, telemetryStore, prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore), "", time.Duration(time.Second), nil, nil, options)
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, reader, nil, logger)
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
@@ -1418,7 +1418,7 @@ func TestMultipleThresholdRule(t *testing.T) {
},
)
require.NoError(t, err)
reader := clickhouseReader.NewReader(slog.Default(), nil, telemetryStore, prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore), "", time.Second, nil, readerCache, options)
reader := clickhouseReader.NewReader(slog.Default(), nil, telemetryStore, prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore), "", time.Second, nil, readerCache, options)
rule, err := NewThresholdRule("69", valuer.GenerateUUID(), &postableRule, reader, nil, logger)
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
"signoz_calls_total": {
@@ -2220,7 +2220,7 @@ func TestThresholdEval_RequireMinPoints(t *testing.T) {
)
require.NoError(t, err)
prometheusProvider := prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{Timeout: 2 * time.Minute}, telemetryStore)
prometheusProvider := prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, telemetryStore)
reader := clickhouseReader.NewReader(slog.Default(), nil, telemetryStore, prometheusProvider, "", time.Second, nil, readerCache, options)
rule, err := NewThresholdRule("some-id", valuer.GenerateUUID(), &postableRule, reader, nil, logger)

View File

@@ -12,8 +12,6 @@ import (
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/modules/apdex"
"github.com/SigNoz/signoz/pkg/modules/apdex/implapdex"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration/implcloudintegration"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
"github.com/SigNoz/signoz/pkg/modules/fields"
@@ -40,25 +38,24 @@ import (
)
type Handlers struct {
SavedView savedview.Handler
Apdex apdex.Handler
Dashboard dashboard.Handler
QuickFilter quickfilter.Handler
TraceFunnel tracefunnel.Handler
RawDataExport rawdataexport.Handler
SpanPercentile spanpercentile.Handler
Services services.Handler
MetricsExplorer metricsexplorer.Handler
Global global.Handler
FlaggerHandler flagger.Handler
GatewayHandler gateway.Handler
Fields fields.Handler
AuthzHandler authz.Handler
ZeusHandler zeus.Handler
QuerierHandler querier.Handler
ServiceAccountHandler serviceaccount.Handler
RegistryHandler factory.Handler
CloudIntegrationHandler cloudintegration.Handler
SavedView savedview.Handler
Apdex apdex.Handler
Dashboard dashboard.Handler
QuickFilter quickfilter.Handler
TraceFunnel tracefunnel.Handler
RawDataExport rawdataexport.Handler
SpanPercentile spanpercentile.Handler
Services services.Handler
MetricsExplorer metricsexplorer.Handler
Global global.Handler
FlaggerHandler flagger.Handler
GatewayHandler gateway.Handler
Fields fields.Handler
AuthzHandler authz.Handler
ZeusHandler zeus.Handler
QuerierHandler querier.Handler
ServiceAccountHandler serviceaccount.Handler
RegistryHandler factory.Handler
}
func NewHandlers(
@@ -76,24 +73,23 @@ func NewHandlers(
registryHandler factory.Handler,
) Handlers {
return Handlers{
SavedView: implsavedview.NewHandler(modules.SavedView),
Apdex: implapdex.NewHandler(modules.Apdex),
Dashboard: impldashboard.NewHandler(modules.Dashboard, providerSettings),
QuickFilter: implquickfilter.NewHandler(modules.QuickFilter),
TraceFunnel: impltracefunnel.NewHandler(modules.TraceFunnel),
RawDataExport: implrawdataexport.NewHandler(modules.RawDataExport),
Services: implservices.NewHandler(modules.Services),
MetricsExplorer: implmetricsexplorer.NewHandler(modules.MetricsExplorer),
SpanPercentile: implspanpercentile.NewHandler(modules.SpanPercentile),
Global: signozglobal.NewHandler(global),
FlaggerHandler: flagger.NewHandler(flaggerService),
GatewayHandler: gateway.NewHandler(gatewayService),
Fields: implfields.NewHandler(providerSettings, telemetryMetadataStore),
AuthzHandler: signozauthzapi.NewHandler(authz),
ZeusHandler: zeus.NewHandler(zeusService, licensing),
QuerierHandler: querierHandler,
ServiceAccountHandler: implserviceaccount.NewHandler(modules.ServiceAccount),
RegistryHandler: registryHandler,
CloudIntegrationHandler: implcloudintegration.NewHandler(),
SavedView: implsavedview.NewHandler(modules.SavedView),
Apdex: implapdex.NewHandler(modules.Apdex),
Dashboard: impldashboard.NewHandler(modules.Dashboard, providerSettings),
QuickFilter: implquickfilter.NewHandler(modules.QuickFilter),
TraceFunnel: impltracefunnel.NewHandler(modules.TraceFunnel),
RawDataExport: implrawdataexport.NewHandler(modules.RawDataExport),
Services: implservices.NewHandler(modules.Services),
MetricsExplorer: implmetricsexplorer.NewHandler(modules.MetricsExplorer),
SpanPercentile: implspanpercentile.NewHandler(modules.SpanPercentile),
Global: signozglobal.NewHandler(global),
FlaggerHandler: flagger.NewHandler(flaggerService),
GatewayHandler: gateway.NewHandler(gatewayService),
Fields: implfields.NewHandler(providerSettings, telemetryMetadataStore),
AuthzHandler: signozauthzapi.NewHandler(authz),
ZeusHandler: zeus.NewHandler(zeusService, licensing),
QuerierHandler: querierHandler,
ServiceAccountHandler: implserviceaccount.NewHandler(modules.ServiceAccount),
RegistryHandler: registryHandler,
}
}

View File

@@ -17,7 +17,6 @@ import (
"github.com/SigNoz/signoz/pkg/http/handler"
"github.com/SigNoz/signoz/pkg/instrumentation"
"github.com/SigNoz/signoz/pkg/modules/authdomain"
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
"github.com/SigNoz/signoz/pkg/modules/fields"
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
@@ -66,7 +65,6 @@ func NewOpenAPI(ctx context.Context, instrumentation instrumentation.Instrumenta
struct{ querier.Handler }{},
struct{ serviceaccount.Handler }{},
struct{ factory.Handler }{},
struct{ cloudintegration.Handler }{},
).New(ctx, instrumentation.ToProviderSettings(), apiserver.Config{})
if err != nil {
return nil, err

View File

@@ -279,7 +279,6 @@ func NewAPIServerProviderFactories(orgGetter organization.Getter, authz authz.Au
handlers.QuerierHandler,
handlers.ServiceAccountHandler,
handlers.RegistryHandler,
handlers.CloudIntegrationHandler,
),
)
}

View File

@@ -165,7 +165,7 @@ func (provider *provider) Report(ctx context.Context) error {
continue
}
users, err := provider.userGetter.ListUsersByOrgID(ctx, org.ID)
users, err := provider.userGetter.ListByOrgID(ctx, org.ID)
if err != nil {
provider.settings.Logger().WarnContext(ctx, "failed to list users", errors.Attr(err), slog.Any("org_id", org.ID))
continue
@@ -178,7 +178,7 @@ func (provider *provider) Report(ctx context.Context) error {
}
for _, user := range users {
traits := types.NewTraitsFromUser(user)
traits := types.NewTraitsFromDeprecatedUser(user)
if maxLastObservedAt, ok := maxLastObservedAtPerUserID[user.ID]; ok {
traits["auth_token.last_observed_at.max.time"] = maxLastObservedAt.UTC()
traits["auth_token.last_observed_at.max.time_unix"] = maxLastObservedAt.Unix()

View File

@@ -51,7 +51,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, max(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, max(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
Args: []any{"signoz_calls_total", uint64(1747785600000), uint64(1747983420000), "cartservice", "cumulative", 0},
},
expectedErr: nil,
@@ -84,7 +84,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
Args: []any{"signoz_calls_total", uint64(1747872000000), uint64(1747983420000), "cartservice", "delta"},
},
expectedErr: nil,
@@ -117,7 +117,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `service.name`, avg(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `service.name`, avg(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
Args: []any{"signoz_calls_total", uint64(1747872000000), uint64(1747983420000), "cartservice", "delta", 0},
},
expectedErr: nil,
@@ -150,7 +150,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'host.name') AS `host.name`, avg(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'host.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `host.name`, ts",
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'host.name') AS `host.name`, avg(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'host.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte WHERE (`host.name`) IN (SELECT `host.name` FROM __spatial_aggregation_cte GROUP BY `host.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `host.name`) DESC, `host.name`, ts ASC",
Args: []any{"system.memory.usage", uint64(1747872000000), uint64(1747983420000), "big-data-node-1", "unspecified", 0},
},
expectedErr: nil,

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"log/slog"
"strings"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/flagger"
@@ -539,6 +540,13 @@ func (b *MetricQueryStatementBuilder) buildSpatialAggregationCTE(
return fmt.Sprintf("__spatial_aggregation_cte AS (%s)", q), args
}
func isMetricAggOrderByKey(key string, config qbtypes.MetricAggregation) bool {
spaceAggOrderBy := fmt.Sprintf("%s(%s)", config.SpaceAggregation.StringValue(), config.MetricName)
timeAggOrderBy := fmt.Sprintf("%s(%s)", config.TimeAggregation.StringValue(), config.MetricName)
timeSpaceAggOrderBy := fmt.Sprintf("%s(%s(%s))", config.SpaceAggregation.StringValue(), config.TimeAggregation.StringValue(), config.MetricName)
return key == spaceAggOrderBy || key == timeAggOrderBy || key == timeSpaceAggOrderBy
}
func (b *MetricQueryStatementBuilder) BuildFinalSelect(
cteFragments []string,
cteArgs [][]any,
@@ -546,73 +554,159 @@ func (b *MetricQueryStatementBuilder) BuildFinalSelect(
) (*qbtypes.Statement, error) {
metricType := query.Aggregations[0].Type
spaceAgg := query.Aggregations[0].SpaceAggregation
finalCTE := "__spatial_aggregation_cte"
if metricType == metrictypes.HistogramType {
histogramCTE, histogramCTEArgs, err := b.buildHistogramCTE(query)
if err != nil {
return nil, err
}
cteFragments = append(cteFragments, histogramCTE)
cteArgs = append(cteArgs, histogramCTEArgs)
finalCTE = "__histogram_cte"
}
groupByKeys := querybuilder.GroupByKeys(query.GroupBy)
hasGroupBy := len(groupByKeys) > 0
if hasGroupBy {
cteWithAvgColumn := b.buildCTEWithAvgColumn(query, finalCTE)
cteFragments = append(cteFragments, cteWithAvgColumn)
cteWithGroupRankColumn := b.buildCTEWithGroupRank(query)
cteFragments = append(cteFragments, cteWithGroupRankColumn)
finalCTE = "__with_group_rank_cte"
}
combined := querybuilder.CombineCTEs(cteFragments)
var args []any
for _, a := range cteArgs {
args = append(args, a...)
}
sb := sqlbuilder.NewSelectBuilder()
sb.Select("ts")
if metricType == metrictypes.HistogramType && spaceAgg == metrictypes.SpaceAggregationCount && query.Aggregations[0].ComparisonSpaceAggregationParam == nil {
sb.SelectMore("le")
}
sb.SelectMore(groupByKeys...)
sb.SelectMore("value")
sb.From(finalCTE)
if query.Having != nil && query.Having.Expression != "" {
rewriter := querybuilder.NewHavingExpressionRewriter()
rewrittenExpr := rewriter.RewriteForMetrics(query.Having.Expression, query.Aggregations)
sb.Where(rewrittenExpr)
}
if metricType == metrictypes.HistogramType && spaceAgg.IsPercentile() {
quantile := query.Aggregations[0].SpaceAggregation.Percentile()
sb.Select("ts")
for _, g := range query.GroupBy {
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
sb.SelectMore(fmt.Sprintf(
"histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), %.3f) AS value",
quantile,
))
sb.From("__spatial_aggregation_cte")
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
sb.GroupBy("ts")
if query.Having != nil && query.Having.Expression != "" {
rewriter := querybuilder.NewHavingExpressionRewriter()
rewrittenExpr := rewriter.RewriteForMetrics(query.Having.Expression, query.Aggregations)
sb.Having(rewrittenExpr)
}
} else if metricType == metrictypes.HistogramType && spaceAgg == metrictypes.SpaceAggregationCount && query.Aggregations[0].ComparisonSpaceAggregationParam != nil {
sb.Select("ts")
for _, g := range query.GroupBy {
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
aggQuery, err := AggregationQueryForHistogramCountWithParams(query.Aggregations[0].ComparisonSpaceAggregationParam)
if err != nil {
return nil, err
}
sb.SelectMore(aggQuery)
sb.From("__spatial_aggregation_cte")
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
sb.GroupBy("ts")
if query.Having != nil && query.Having.Expression != "" {
rewriter := querybuilder.NewHavingExpressionRewriter()
rewrittenExpr := rewriter.RewriteForMetrics(query.Having.Expression, query.Aggregations)
sb.Having(rewrittenExpr)
}
} else {
// for count aggregation on histograms with no params, the exact result of spatial aggregation can be sent forward
sb.Select("*")
sb.From("__spatial_aggregation_cte")
if query.Having != nil && query.Having.Expression != "" {
rewriter := querybuilder.NewHavingExpressionRewriter()
rewrittenExpr := rewriter.RewriteForMetrics(query.Having.Expression, query.Aggregations)
sb.Where(rewrittenExpr)
if hasGroupBy {
sb.OrderBy("group_rank")
if query.Limit > 0 {
sb.Where(fmt.Sprintf("group_rank <= %d", query.Limit))
}
}
sb.OrderBy(querybuilder.GroupByKeys(query.GroupBy)...)
sb.OrderBy("ts")
if metricType == metrictypes.HistogramType && spaceAgg == metrictypes.SpaceAggregationCount && query.Aggregations[0].ComparisonSpaceAggregationParam == nil {
sb.OrderBy("toFloat64(le)")
}
sb.OrderBy("ts ASC")
q, a := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
return &qbtypes.Statement{Query: combined + q, Args: append(args, a...)}, nil
}
func (b *MetricQueryStatementBuilder) buildHistogramCTE(
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
) (string, []any, error) {
spaceAgg := query.Aggregations[0].SpaceAggregation
histogramCTEQueryBuilder := sqlbuilder.NewSelectBuilder()
if spaceAgg.IsPercentile() {
histogramCTEQueryBuilder.Select("ts")
for _, g := range query.GroupBy {
histogramCTEQueryBuilder.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
quantile := spaceAgg.Percentile()
histogramCTEQueryBuilder.SelectMore(fmt.Sprintf(
"histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), %.3f) AS value",
quantile,
))
histogramCTEQueryBuilder.From("__spatial_aggregation_cte")
histogramCTEQueryBuilder.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
histogramCTEQueryBuilder.GroupBy("ts")
} else if spaceAgg == metrictypes.SpaceAggregationCount && query.Aggregations[0].ComparisonSpaceAggregationParam != nil {
histogramCTEQueryBuilder.Select("ts")
for _, g := range query.GroupBy {
histogramCTEQueryBuilder.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
aggQuery, err := AggregationQueryForHistogramCountWithParams(query.Aggregations[0].ComparisonSpaceAggregationParam)
if err != nil {
return "", nil, err
}
histogramCTEQueryBuilder.SelectMore(aggQuery)
histogramCTEQueryBuilder.From("__spatial_aggregation_cte")
histogramCTEQueryBuilder.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
histogramCTEQueryBuilder.GroupBy("ts")
} else {
// for count aggregation on histograms with no params, the exact result of spatial aggregation can be sent forward
histogramCTEQueryBuilder.Select("*")
histogramCTEQueryBuilder.From("__spatial_aggregation_cte")
}
histogramQueryCTE, histogramQueryCTEArgs := histogramCTEQueryBuilder.BuildWithFlavor(sqlbuilder.ClickHouse)
histogramCTE := fmt.Sprintf("__histogram_cte AS (%s)", histogramQueryCTE)
return histogramCTE, histogramQueryCTEArgs, nil
}
/*
this receives a CTE (__spatial_aggregation_cte or __histogram_cte) that has columns ts, value, and a column each for all the group by keys
it creates a CTE (__with_avg_cte) that adds a column avg_val which has the avg value for the group the row belongs in
*/
func (b *MetricQueryStatementBuilder) buildCTEWithAvgColumn(
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
latestCTE string,
) string {
withAvgCTEBuilder := sqlbuilder.NewSelectBuilder()
withAvgCTEBuilder.Select("*")
groupByKeys := querybuilder.GroupByKeys(query.GroupBy)
withAvgCTEBuilder.SelectMore(fmt.Sprintf("avgIf(value, isNaN(value) = 0) OVER (PARTITION BY %s) AS avg_val", strings.Join(groupByKeys, ",")))
withAvgCTEBuilder.From(latestCTE)
withAvgCTEQuery, _ := withAvgCTEBuilder.BuildWithFlavor(sqlbuilder.ClickHouse) // no args so second return param is ignored
withAvgCTE := fmt.Sprintf("__with_avg_cte AS (%s)", withAvgCTEQuery)
return withAvgCTE
}
/*
this receives the __with_avg_cte that has columns ts, value, a column each for all the group by keys, and avg_val which has the avg value for the group the row belongs in
it creates a CTE (__with_group_rank_cte) that adds a column group_rank that ranks each group based on the order by keys (or by avg val if there are none)
*/
func (b *MetricQueryStatementBuilder) buildCTEWithGroupRank(
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
) string {
withGroupByCTEBuilder := sqlbuilder.NewSelectBuilder()
withGroupByCTEBuilder.Select("*")
windowOrder := []string{}
orderedKeys := map[string]struct{}{} // this will be used to add the remaining keys as tie breakers in the end
if len(query.Order) > 0 {
for _, o := range query.Order {
key := o.Key.Name
if isMetricAggOrderByKey(key, query.Aggregations[0]) {
windowOrder = append(windowOrder, fmt.Sprintf("avg_val %s", o.Direction.StringValue()))
} else {
windowOrder = append(windowOrder, fmt.Sprintf("`%s` %s", key, o.Direction.StringValue()))
orderedKeys[fmt.Sprintf("`%s`", key)] = struct{}{}
}
}
} else {
windowOrder = append(windowOrder, "avg_val DESC")
}
groupByKeys := querybuilder.GroupByKeys(query.GroupBy)
for _, gk := range groupByKeys { // keys that haven't been added via order by keys will be added at the end as tie breakers
if _, ok := orderedKeys[gk]; !ok {
windowOrder = append(windowOrder, fmt.Sprintf("%s ASC", gk))
}
}
withGroupByCTEBuilder.SelectMore(fmt.Sprintf("dense_rank() OVER (ORDER BY %s) AS group_rank", strings.Join(windowOrder, ",")))
withGroupByCTEBuilder.From("__with_avg_cte")
withGroupRankCTEQuery, _ := withGroupByCTEBuilder.BuildWithFlavor(sqlbuilder.ClickHouse) // no args so second return param is ignored
withGroupRankCTE := fmt.Sprintf("__with_group_rank_cte AS (%s)", withGroupRankCTEQuery)
return withGroupRankCTE
}

View File

@@ -15,16 +15,17 @@ import (
)
func TestStatementBuilder(t *testing.T) {
cases := []struct {
name string
requestType qbtypes.RequestType
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]
expected qbtypes.Statement
expectedErr error
}{
type baseQuery struct {
name string
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]
orderKey string
args []any
cte string
}
bases := []baseQuery{
{
name: "test_cumulative_rate_sum",
requestType: qbtypes.RequestTypeTimeSeries,
name: "cumulative_rate_sum",
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
@@ -40,24 +41,16 @@ func TestStatementBuilder(t *testing.T) {
Filter: &qbtypes.Filter{
Expression: "service.name = 'cartservice'",
},
Limit: 10,
GroupBy: []qbtypes.GroupByKey{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
},
},
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
},
expectedErr: nil,
orderKey: "service.name",
args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
cte: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`)",
},
{
name: "test_cumulative_rate_sum_with_mat_column",
requestType: qbtypes.RequestTypeTimeSeries,
name: "cumulative_rate_sum_with_mat_column",
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
@@ -73,24 +66,16 @@ func TestStatementBuilder(t *testing.T) {
Filter: &qbtypes.Filter{
Expression: "materialized.key.name REGEXP 'cartservice' OR service.name = 'cartservice'",
},
Limit: 10,
GroupBy: []qbtypes.GroupByKey{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
},
},
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND (match(JSONExtractString(labels, 'materialized.key.name'), ?) OR JSONExtractString(labels, 'service.name') = ?) GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
},
expectedErr: nil,
orderKey: "service.name",
args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
cte: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND (match(JSONExtractString(labels, 'materialized.key.name'), ?) OR JSONExtractString(labels, 'service.name') = ?) GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`)",
},
{
name: "test_delta_rate_sum",
requestType: qbtypes.RequestTypeTimeSeries,
name: "delta_rate_sum",
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
@@ -106,24 +91,16 @@ func TestStatementBuilder(t *testing.T) {
Filter: &qbtypes.Filter{
Expression: "service.name = 'cartservice'",
},
Limit: 10,
GroupBy: []qbtypes.GroupByKey{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
},
},
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
},
},
expected: qbtypes.Statement{
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_calls_total", uint64(1747947390000), uint64(1747983420000)},
},
expectedErr: nil,
orderKey: "service.name",
args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_calls_total", uint64(1747947390000), uint64(1747983420000)},
cte: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`)",
},
{
name: "test_histogram_percentile1",
requestType: qbtypes.RequestTypeTimeSeries,
name: "histogram_percentile1",
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
@@ -139,24 +116,38 @@ func TestStatementBuilder(t *testing.T) {
Filter: &qbtypes.Filter{
Expression: "service.name = 'cartservice'",
},
Limit: 10,
GroupBy: []qbtypes.GroupByKey{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
},
},
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
},
},
expected: qbtypes.Statement{
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts ORDER BY `service.name`, ts",
Args: []any{"signoz_latency", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_latency", uint64(1747947390000), uint64(1747983420000)},
},
expectedErr: nil,
orderKey: "service.name",
args: []any{"signoz_latency", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_latency", uint64(1747947390000), uint64(1747983420000)},
cte: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`, `le`), __histogram_cte AS (SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts)",
},
{
name: "test_gauge_avg_sum",
requestType: qbtypes.RequestTypeTimeSeries,
name: "histogram_percentile2",
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "http_server_duration_bucket",
Type: metrictypes.HistogramType,
Temporality: metrictypes.Cumulative,
TimeAggregation: metrictypes.TimeAggregationRate,
SpaceAggregation: metrictypes.SpaceAggregationPercentile95,
},
},
GroupBy: []qbtypes.GroupByKey{
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
},
},
orderKey: "service.name",
args: []any{"http_server_duration_bucket", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "http_server_duration_bucket", uint64(1747947360000), uint64(1747983420000), 0},
cte: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, `le`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name`, `le` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, `le`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`, `le`), __histogram_cte AS (SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts)",
},
{
name: "gauge_avg_sum",
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
@@ -172,53 +163,83 @@ func TestStatementBuilder(t *testing.T) {
Filter: &qbtypes.Filter{
Expression: "host.name = 'big-data-node-1'",
},
Limit: 10,
GroupBy: []qbtypes.GroupByKey{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "host.name",
},
},
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "host.name"}},
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `host.name`, avg(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'host.name') AS `host.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'host.name') = ? GROUP BY fingerprint, `host.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `host.name`, ts",
Args: []any{"system.memory.usage", uint64(1747936800000), uint64(1747983420000), "unspecified", false, "big-data-node-1", "system.memory.usage", uint64(1747947390000), uint64(1747983420000), 0},
},
expectedErr: nil,
},
{
name: "test_histogram_percentile2",
requestType: qbtypes.RequestTypeTimeSeries,
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "http_server_duration_bucket",
Type: metrictypes.HistogramType,
Temporality: metrictypes.Cumulative,
TimeAggregation: metrictypes.TimeAggregationRate,
SpaceAggregation: metrictypes.SpaceAggregationPercentile95,
},
},
Limit: 10,
GroupBy: []qbtypes.GroupByKey{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
},
},
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, `le`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name`, `le` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, `le`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts ORDER BY `service.name`, ts",
Args: []any{"http_server_duration_bucket", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "http_server_duration_bucket", uint64(1747947360000), uint64(1747983420000), 0},
},
expectedErr: nil,
orderKey: "host.name",
args: []any{"system.memory.usage", uint64(1747936800000), uint64(1747983420000), "unspecified", false, "big-data-node-1", "system.memory.usage", uint64(1747947390000), uint64(1747983420000), 0},
cte: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `host.name`, avg(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'host.name') AS `host.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'host.name') = ? GROUP BY fingerprint, `host.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`)",
},
}
type variant struct {
name string
limit int
hasOrder bool
}
variants := []variant{
{"with_limits", 10, false},
{"without_limits", 0, false},
{"with_order_by", 0, true},
{"with_order_by_and_limits", 10, true},
}
sumMetricsFinalSelects := map[string]string{
"with_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
"without_limits": " SELECT * FROM __spatial_aggregation_cte ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
"with_order_by": " SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name` asc, ts ASC",
"with_order_by_and_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY `service.name` asc LIMIT 10) ORDER BY `service.name` asc, ts ASC",
}
histogramMetricsFinalSelects := map[string]string{
"with_limits": " SELECT * FROM __histogram_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __histogram_cte GROUP BY `service.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
"without_limits": " SELECT * FROM __histogram_cte ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
"with_order_by": " SELECT * FROM __histogram_cte ORDER BY `service.name` asc, ts ASC",
"with_order_by_and_limits": " SELECT * FROM __histogram_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __histogram_cte GROUP BY `service.name` ORDER BY `service.name` asc LIMIT 10) ORDER BY `service.name` asc, ts ASC",
}
// expectedFinalSelects maps "base/variant" to the final SELECT portion after the CTE.
// The full expected query is: base.cte + expectedFinalSelects[name]
expectedFinalSelects := map[string]string{
// cumulative_rate_sum
"cumulative_rate_sum/with_limits": sumMetricsFinalSelects["with_limits"],
"cumulative_rate_sum/without_limits": sumMetricsFinalSelects["without_limits"],
"cumulative_rate_sum/with_order_by": sumMetricsFinalSelects["with_order_by"],
"cumulative_rate_sum/with_order_by_and_limits": sumMetricsFinalSelects["with_order_by_and_limits"],
// cumulative_rate_sum_with_mat_column
"cumulative_rate_sum_with_mat_column/with_limits": sumMetricsFinalSelects["with_limits"],
"cumulative_rate_sum_with_mat_column/without_limits": sumMetricsFinalSelects["without_limits"],
"cumulative_rate_sum_with_mat_column/with_order_by": sumMetricsFinalSelects["with_order_by"],
"cumulative_rate_sum_with_mat_column/with_order_by_and_limits": sumMetricsFinalSelects["with_order_by_and_limits"],
// delta_rate_sum
"delta_rate_sum/with_limits": sumMetricsFinalSelects["with_limits"],
"delta_rate_sum/without_limits": sumMetricsFinalSelects["without_limits"],
"delta_rate_sum/with_order_by": sumMetricsFinalSelects["with_order_by"],
"delta_rate_sum/with_order_by_and_limits": sumMetricsFinalSelects["with_order_by_and_limits"],
// histogram_percentile1
"histogram_percentile1/with_limits": histogramMetricsFinalSelects["with_limits"],
"histogram_percentile1/without_limits": histogramMetricsFinalSelects["without_limits"],
"histogram_percentile1/with_order_by": histogramMetricsFinalSelects["with_order_by"],
"histogram_percentile1/with_order_by_and_limits": histogramMetricsFinalSelects["with_order_by_and_limits"],
// histogram_percentile2
"histogram_percentile2/with_limits": histogramMetricsFinalSelects["with_limits"],
"histogram_percentile2/without_limits": histogramMetricsFinalSelects["without_limits"],
"histogram_percentile2/with_order_by": histogramMetricsFinalSelects["with_order_by"],
"histogram_percentile2/with_order_by_and_limits": histogramMetricsFinalSelects["with_order_by_and_limits"],
// gauge_avg_sum
"gauge_avg_sum/with_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`host.name`) IN (SELECT `host.name` FROM __spatial_aggregation_cte GROUP BY `host.name` ORDER BY avg(value) DESC LIMIT 10) ORDER BY avg(value) OVER (PARTITION BY `host.name`) DESC, `host.name`, ts ASC",
"gauge_avg_sum/without_limits": " SELECT * FROM __spatial_aggregation_cte ORDER BY avg(value) OVER (PARTITION BY `host.name`) DESC, `host.name`, ts ASC",
"gauge_avg_sum/with_order_by": " SELECT * FROM __spatial_aggregation_cte ORDER BY `host.name` asc, ts ASC",
"gauge_avg_sum/with_order_by_and_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`host.name`) IN (SELECT `host.name` FROM __spatial_aggregation_cte GROUP BY `host.name` ORDER BY `host.name` asc LIMIT 10) ORDER BY `host.name` asc, ts ASC",
}
fm := NewFieldMapper()
cb := NewConditionBuilder(fm)
mockMetadataStore := telemetrytypestest.NewMockMetadataStore()
@@ -227,15 +248,13 @@ func TestStatementBuilder(t *testing.T) {
t.Fatalf("failed to load field keys: %v", err)
}
mockMetadataStore.KeysMap = keys
// NOTE: LoadFieldKeysFromJSON doesn't set Materialized field
// for keys, so we have to set it manually here for testing
if _, ok := mockMetadataStore.KeysMap["materialized.key.name"]; ok {
if len(mockMetadataStore.KeysMap["materialized.key.name"]) > 0 {
mockMetadataStore.KeysMap["materialized.key.name"][0].Materialized = true
}
}
flagger, err := flagger.New(context.Background(), instrumentationtest.New().ToProviderSettings(), flagger.Config{}, flagger.MustNewRegistry())
fl, err := flagger.New(context.Background(), instrumentationtest.New().ToProviderSettings(), flagger.Config{}, flagger.MustNewRegistry())
if err != nil {
t.Fatalf("failed to create flagger: %v", err)
}
@@ -245,23 +264,30 @@ func TestStatementBuilder(t *testing.T) {
mockMetadataStore,
fm,
cb,
flagger,
fl,
)
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
for _, b := range bases {
for _, v := range variants {
name := b.name + "/" + v.name
t.Run(name, func(t *testing.T) {
q := b.query
q.Limit = v.limit
if v.hasOrder {
q.Order = []qbtypes.OrderBy{
{
Key: qbtypes.OrderByKey{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: b.orderKey}},
Direction: qbtypes.OrderDirectionAsc,
},
}
}
q, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, c.requestType, c.query, nil)
result, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, qbtypes.RequestTypeTimeSeries, q, nil)
if c.expectedErr != nil {
require.Error(t, err)
require.Contains(t, err.Error(), c.expectedErr.Error())
} else {
require.NoError(t, err)
require.Equal(t, c.expected.Query, q.Query)
require.Equal(t, c.expected.Args, q.Args)
require.Equal(t, c.expected.Warnings, q.Warnings)
}
})
require.Equal(t, b.cte+expectedFinalSelects[name], result.Query)
require.Equal(t, b.args, result.Args)
})
}
}
}

View File

@@ -111,6 +111,23 @@ func (b *traceQueryStatementBuilder) Build(
query = b.adjustKeys(ctx, keys, query, requestType)
// Check if filter contains trace_id(s) and optimize time range if needed
if query.Filter != nil && query.Filter.Expression != "" && b.telemetryStore != nil {
traceIDs, found := ExtractTraceIDsFromFilter(query.Filter.Expression)
if found && len(traceIDs) > 0 {
finder := NewTraceTimeRangeFinder(b.telemetryStore)
traceStart, traceEnd, ok := finder.GetTraceTimeRangeMulti(ctx, traceIDs)
if !ok {
b.logger.DebugContext(ctx, "failed to get trace time range", slog.Any("trace_ids", traceIDs))
} else if traceStart > 0 && traceEnd > 0 {
start = uint64(traceStart)
end = uint64(traceEnd)
b.logger.DebugContext(ctx, "optimized time range for traces", slog.Any("trace_ids", traceIDs), slog.Uint64("start", start), slog.Uint64("end", end))
}
}
}
// Create SQL builder
q := sqlbuilder.NewSelectBuilder()

View File

@@ -65,10 +65,10 @@ type StorableRole struct {
types.Identifiable
types.TimeAuditable
Name string `bun:"name,type:string" json:"name"`
Description string `bun:"description,type:string" json:"description"`
Type string `bun:"type,type:string" json:"type"`
OrgID string `bun:"org_id,type:string" json:"orgId"`
Name string `bun:"name,type:string"`
Description string `bun:"description,type:string"`
Type string `bun:"type,type:string"`
OrgID string `bun:"org_id,type:string"`
}
type Role struct {

View File

@@ -5,14 +5,13 @@ import (
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
)
var (
ErrCodeUserRoleAlreadyExists = errors.MustNewCode("user_role_already_exists")
ErrCodeUserRolesNotFound = errors.MustNewCode("user_roles_not_found")
ErrCodeUserRolesNotFound = errors.MustNewCode("user_roles_not_found")
)
type UserRole struct {
@@ -48,11 +47,6 @@ func NewUserRoles(userID valuer.UUID, roles []*Role) []*UserRole {
return userRoles
}
type UserWithRoles struct {
*types.User
UserRoles []*UserRole `json:"userRoles"`
}
type UserRoleStore interface {
// create user roles in bulk
CreateUserRoles(ctx context.Context, userRoles []*UserRole) error
@@ -65,7 +59,4 @@ type UserRoleStore interface {
// delete user role entries by user id
DeleteUserRoles(ctx context.Context, userID valuer.UUID) error
// delete a single user role entry by user id and role id
DeleteUserRoleByUserIDAndRoleID(ctx context.Context, userID valuer.UUID, roleID valuer.UUID) error
}

View File

@@ -10,35 +10,34 @@ import (
type Account struct {
types.Identifiable
types.TimeAuditable
ProviderAccountID *string `json:"providerAccountId" required:"true" nullable:"true"`
Provider CloudProviderType `json:"provider" required:"true"`
RemovedAt *time.Time `json:"removedAt" required:"true" nullable:"true"`
AgentReport *AgentReport `json:"agentReport" required:"true" nullable:"true"`
OrgID valuer.UUID `json:"orgId" required:"true"`
Config *AccountConfig `json:"config" required:"true" nullable:"false"`
ProviderAccountId *string `json:"providerAccountID,omitempty"`
Provider CloudProviderType `json:"provider"`
RemovedAt *time.Time `json:"removedAt,omitempty"`
AgentReport *AgentReport `json:"agentReport,omitempty"`
OrgID valuer.UUID `json:"orgID"`
Config *AccountConfig `json:"config,omitempty"`
}
// AgentReport represents heartbeats sent by the agent.
type AgentReport struct {
TimestampMillis int64 `json:"timestampMillis" required:"true"`
Data map[string]any `json:"data" required:"true" nullable:"true"`
}
type AccountConfig struct {
// required till new providers are added
AWS *AWSAccountConfig `json:"aws" required:"true" nullable:"false"`
TimestampMillis int64 `json:"timestampMillis"`
Data map[string]any `json:"data"`
}
type GettableAccounts struct {
Accounts []*Account `json:"accounts" required:"true" nullable:"false"`
Accounts []*Account `json:"accounts"`
}
type GettableAccount = Account
type UpdatableAccount struct {
Config *AccountConfig `json:"config" required:"true" nullable:"false"`
Config *AccountConfig `json:"config"`
}
type AccountConfig struct {
AWS *AWSAccountConfig `json:"aws,omitempty"`
}
type AWSAccountConfig struct {
Regions []string `json:"regions" required:"true" nullable:"false"`
Regions []string `json:"regions"`
}

View File

@@ -1,81 +1,88 @@
package cloudintegrationtypes
import (
"time"
"github.com/SigNoz/signoz/pkg/valuer"
)
import "github.com/SigNoz/signoz/pkg/types/integrationtypes"
type ConnectionArtifactRequest struct {
// required till new providers are added
Aws *AWSConnectionArtifactRequest `json:"aws" required:"true" nullable:"false"`
Aws *AWSConnectionArtifactRequest `json:"aws"`
}
type AWSConnectionArtifactRequest struct {
DeploymentRegion string `json:"deploymentRegion" required:"true"`
Regions []string `json:"regions" required:"true" nullable:"false"`
DeploymentRegion string `json:"deploymentRegion"`
Regions []string `json:"regions"`
}
type PostableConnectionArtifact = ConnectionArtifactRequest
type ConnectionArtifact struct {
// required till new providers are added
Aws *AWSConnectionArtifact `json:"aws" required:"true" nullable:"false"`
Aws *AWSConnectionArtifact `json:"aws"`
}
type AWSConnectionArtifact struct {
ConnectionURL string `json:"connectionURL" required:"true"`
ConnectionUrl string `json:"connectionURL"`
}
type GettableAccountWithArtifact struct {
ID valuer.UUID `json:"id" required:"true"`
Artifact *ConnectionArtifact `json:"connectionArtifact" required:"true"`
type GettableConnectionArtifact = ConnectionArtifact
type AccountStatus struct {
Id string `json:"id"`
ProviderAccountId *string `json:"providerAccountID,omitempty"`
Status integrationtypes.AccountStatus `json:"status"`
}
type GettableAccountStatus = AccountStatus
type AgentCheckInRequest struct {
ProviderAccountID string `json:"providerAccountId" required:"false"`
CloudIntegrationID string `json:"cloudIntegrationId" required:"false"`
// older backward compatible fields are mapped to new fields
// CloudIntegrationId string `json:"cloudIntegrationId"`
// AccountId string `json:"accountId"`
Data map[string]any `json:"data" required:"true" nullable:"true"`
// New fields
ProviderAccountId string `json:"providerAccountId"`
CloudAccountId string `json:"cloudAccountId"`
Data map[string]any `json:"data,omitempty"`
}
type PostableAgentCheckInRequest struct {
AgentCheckInRequest
// following are backward compatible fields for older running agents
// which gets mapped to new fields in AgentCheckInRequest
ID string `json:"account_id" required:"false"` // => CloudIntegrationID
AccountID string `json:"cloud_account_id" required:"false"` // => ProviderAccountID
}
type AgentCheckInResponse struct {
CloudIntegrationID string `json:"cloudIntegrationId" required:"true"`
ProviderAccountID string `json:"providerAccountId" required:"true"`
IntegrationConfig *ProviderIntegrationConfig `json:"integrationConfig" required:"true"`
RemovedAt *time.Time `json:"removedAt" required:"true" nullable:"true"`
CloudIntegrationId string `json:"cloud_integration_id"`
CloudAccountId string `json:"cloud_account_id"`
}
type GettableAgentCheckInResponse struct {
// Older fields for backward compatibility with existing AWS agents
AccountID string `json:"account_id" required:"true"`
CloudAccountID string `json:"cloud_account_id" required:"true"`
OlderIntegrationConfig *IntegrationConfig `json:"integration_config" required:"true" nullable:"true"`
OlderRemovedAt *time.Time `json:"removed_at" required:"true" nullable:"true"`
AgentCheckInResponse
// For backward compatibility
CloudIntegrationId string `json:"cloud_integration_id"`
AccountId string `json:"account_id"`
}
type AgentCheckInResponse struct {
// Older fields for backward compatibility are mapped to new fields below
// CloudIntegrationId string `json:"cloud_integration_id"`
// AccountId string `json:"account_id"`
// New fields
ProviderAccountId string `json:"providerAccountId"`
CloudAccountId string `json:"cloudAccountId"`
// IntegrationConfig populates data related to integration that is required for an agent
// to start collecting telemetry data
// keeping JSON key snake_case for backward compatibility
IntegrationConfig *IntegrationConfig `json:"integration_config,omitempty"`
}
// IntegrationConfig older integration config struct for backward compatibility,
// this will be eventually removed once agents are updated to use new struct.
type IntegrationConfig struct {
EnabledRegions []string `json:"enabled_regions" required:"true" nullable:"false"` // backward compatible
Telemetry *AWSCollectionStrategy `json:"telemetry" required:"true" nullable:"false"` // backward compatible
}
EnabledRegions []string `json:"enabledRegions"` // backward compatible
Telemetry *AWSCollectionStrategy `json:"telemetry,omitempty"` // backward compatible
type ProviderIntegrationConfig struct {
AWS *AWSIntegrationConfig `json:"aws" required:"true" nullable:"false"`
// new fields
AWS *AWSIntegrationConfig `json:"aws,omitempty"`
}
type AWSIntegrationConfig struct {
EnabledRegions []string `json:"enabledRegions" required:"true" nullable:"false"`
Telemetry *AWSCollectionStrategy `json:"telemetry" required:"true" nullable:"false"`
EnabledRegions []string `json:"enabledRegions"`
Telemetry *AWSCollectionStrategy `json:"telemetry,omitempty"`
}

View File

@@ -10,19 +10,20 @@ import (
"github.com/SigNoz/signoz/pkg/valuer"
)
var ErrCodeInvalidServiceID = errors.MustNewCode("invalid_service_id")
var (
S3Sync = valuer.NewString("s3sync")
// ErrCodeInvalidServiceID is the error code for invalid service id.
ErrCodeInvalidServiceID = errors.MustNewCode("invalid_service_id")
)
type ServiceID struct{ valuer.String }
type CloudIntegrationService struct {
types.Identifiable
types.TimeAuditable
Type ServiceID `json:"type"`
Config *ServiceConfig `json:"config"`
CloudIntegrationID valuer.UUID `json:"cloudIntegrationId"`
}
type ServiceConfig struct {
// required till new providers are added
AWS *AWSServiceConfig `json:"aws" required:"true" nullable:"false"`
CloudIntegrationID valuer.UUID `json:"cloudIntegrationID"`
}
// ServiceMetadata helps to quickly list available services and whether it is enabled or not.
@@ -31,56 +32,26 @@ type ServiceConfig struct {
type ServiceMetadata struct {
ServiceDefinitionMetadata
// if the service is enabled for the account
Enabled bool `json:"enabled" required:"true"`
}
// ServiceDefinitionMetadata represents service definition metadata. This is useful for showing service tab in frontend.
type ServiceDefinitionMetadata struct {
ID string `json:"id" required:"true"`
Title string `json:"title" required:"true"`
Icon string `json:"icon" required:"true"`
Enabled bool `json:"enabled"`
}
type GettableServicesMetadata struct {
Services []*ServiceMetadata `json:"services" required:"true" nullable:"false"`
Services []*ServiceMetadata `json:"services"`
}
type Service struct {
ServiceDefinition
ServiceConfig *ServiceConfig `json:"serviceConfig" required:"false" nullable:"false"`
ServiceConfig *ServiceConfig `json:"serviceConfig"`
}
type GettableService = Service
type UpdatableService struct {
Config *ServiceConfig `json:"config" required:"true" nullable:"false"`
Config *ServiceConfig `json:"config"`
}
type ServiceDefinition struct {
ServiceDefinitionMetadata
Overview string `json:"overview" required:"true"` // markdown
Assets Assets `json:"assets" required:"true"`
SupportedSignals SupportedSignals `json:"supported_signals" required:"true"`
DataCollected DataCollected `json:"dataCollected" required:"true"`
Strategy *CollectionStrategy `json:"telemetryCollectionStrategy" required:"true" nullable:"false"`
}
// SupportedSignals for cloud provider's service.
type SupportedSignals struct {
Logs bool `json:"logs"`
Metrics bool `json:"metrics"`
}
// DataCollected is curated static list of metrics and logs, this is shown as part of service overview.
type DataCollected struct {
Logs []CollectedLogAttribute `json:"logs"`
Metrics []CollectedMetric `json:"metrics"`
}
// CollectionStrategy is cloud provider specific configuration for signal collection,
// this is used by agent to understand the nitty-gritty for collecting telemetry for the cloud provider.
type CollectionStrategy struct {
AWS *AWSCollectionStrategy `json:"aws" required:"true" nullable:"false"`
type ServiceConfig struct {
AWS *AWSServiceConfig `json:"aws,omitempty"`
}
type AWSServiceConfig struct {
@@ -99,11 +70,45 @@ type AWSServiceMetricsConfig struct {
Enabled bool `json:"enabled"`
}
// ServiceDefinitionMetadata represents service definition metadata. This is useful for showing service tab in frontend.
type ServiceDefinitionMetadata struct {
Id string `json:"id"`
Title string `json:"title"`
Icon string `json:"icon"`
}
type ServiceDefinition struct {
ServiceDefinitionMetadata
Overview string `json:"overview"` // markdown
Assets Assets `json:"assets"`
SupportedSignals SupportedSignals `json:"supported_signals"`
DataCollected DataCollected `json:"dataCollected"`
Strategy *CollectionStrategy `json:"telemetryCollectionStrategy"`
}
// CollectionStrategy is cloud provider specific configuration for signal collection,
// this is used by agent to understand the nitty-gritty for collecting telemetry for the cloud provider.
type CollectionStrategy struct {
AWS *AWSCollectionStrategy `json:"aws,omitempty"`
}
// Assets represents the collection of dashboards.
type Assets struct {
Dashboards []Dashboard `json:"dashboards"`
}
// SupportedSignals for cloud provider's service.
type SupportedSignals struct {
Logs bool `json:"logs"`
Metrics bool `json:"metrics"`
}
// DataCollected is curated static list of metrics and logs, this is shown as part of service overview.
type DataCollected struct {
Logs []CollectedLogAttribute `json:"logs"`
Metrics []CollectedMetric `json:"metrics"`
}
// CollectedLogAttribute represents a log attribute that is present in all log entries for a service,
// this is shown as part of service overview.
type CollectedLogAttribute struct {
@@ -164,23 +169,56 @@ type AWSLogsStrategy struct {
// This is used to show available pre-made dashboards for a service,
// hence has additional fields like id, title and description
type Dashboard struct {
ID string `json:"id"`
Id string `json:"id"`
Title string `json:"title"`
Description string `json:"description"`
Definition dashboardtypes.StorableDashboardData `json:"definition,omitempty"`
}
// SupportedServices is the map of supported services for each cloud provider.
var SupportedServices = map[CloudProviderType][]ServiceID{
CloudProviderTypeAWS: {
{valuer.NewString("alb")},
{valuer.NewString("api-gateway")},
{valuer.NewString("dynamodb")},
{valuer.NewString("ec2")},
{valuer.NewString("ecs")},
{valuer.NewString("eks")},
{valuer.NewString("elasticache")},
{valuer.NewString("lambda")},
{valuer.NewString("msk")},
{valuer.NewString("rds")},
{valuer.NewString("s3sync")},
{valuer.NewString("sns")},
{valuer.NewString("sqs")},
},
}
// NewServiceID returns a new ServiceID from a string, validated against the supported services for the given cloud provider.
func NewServiceID(provider CloudProviderType, service string) (ServiceID, error) {
services, ok := SupportedServices[provider]
if !ok {
return ServiceID{}, errors.NewInvalidInputf(ErrCodeInvalidServiceID, "no services defined for cloud provider: %s", provider)
}
for _, s := range services {
if s.StringValue() == service {
return s, nil
}
}
return ServiceID{}, errors.NewInvalidInputf(ErrCodeInvalidServiceID, "invalid service id %q for cloud provider %s", service, provider)
}
// UTILS
// GetCloudIntegrationDashboardID returns the dashboard id for a cloud integration, given the cloud provider, service id, and dashboard id.
// This is used to generate unique dashboard ids for cloud integration, and also to parse the dashboard id to get the cloud provider and service id when needed.
func GetCloudIntegrationDashboardID(cloudProvider CloudProviderType, svcID, dashboardID string) string {
return fmt.Sprintf("cloud-integration--%s--%s--%s", cloudProvider, svcID, dashboardID)
func GetCloudIntegrationDashboardID(cloudProvider CloudProviderType, svcId, dashboardId string) string {
return fmt.Sprintf("cloud-integration--%s--%s--%s", cloudProvider, svcId, dashboardId)
}
// GetDashboardsFromAssets returns the list of dashboards for the cloud provider service from definition.
func GetDashboardsFromAssets(
svcID string,
svcId string,
orgID valuer.UUID,
cloudProvider CloudProviderType,
createdAt time.Time,
@@ -191,7 +229,7 @@ func GetDashboardsFromAssets(
for _, d := range assets.Dashboards {
author := fmt.Sprintf("%s-integration", cloudProvider)
dashboards = append(dashboards, &dashboardtypes.Dashboard{
ID: GetCloudIntegrationDashboardID(cloudProvider, svcID, d.ID),
ID: GetCloudIntegrationDashboardID(cloudProvider, svcId, d.Id),
Locked: true,
OrgID: orgID,
Data: d.Definition,

View File

@@ -1,75 +0,0 @@
package cloudintegrationtypes
import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/valuer"
)
type ServiceID struct{ valuer.String }
var (
AWSServiceALB = ServiceID{valuer.NewString("alb")}
AWSServiceAPIGateway = ServiceID{valuer.NewString("api-gateway")}
AWSServiceDynamoDB = ServiceID{valuer.NewString("dynamodb")}
AWSServiceEC2 = ServiceID{valuer.NewString("ec2")}
AWSServiceECS = ServiceID{valuer.NewString("ecs")}
AWSServiceEKS = ServiceID{valuer.NewString("eks")}
AWSServiceElastiCache = ServiceID{valuer.NewString("elasticache")}
AWSServiceLambda = ServiceID{valuer.NewString("lambda")}
AWSServiceMSK = ServiceID{valuer.NewString("msk")}
AWSServiceRDS = ServiceID{valuer.NewString("rds")}
AWSServiceS3Sync = ServiceID{valuer.NewString("s3sync")}
AWSServiceSNS = ServiceID{valuer.NewString("sns")}
AWSServiceSQS = ServiceID{valuer.NewString("sqs")}
)
func (ServiceID) Enum() []any {
return []any{
AWSServiceALB,
AWSServiceAPIGateway,
AWSServiceDynamoDB,
AWSServiceEC2,
AWSServiceECS,
AWSServiceEKS,
AWSServiceElastiCache,
AWSServiceLambda,
AWSServiceMSK,
AWSServiceRDS,
AWSServiceS3Sync,
AWSServiceSNS,
AWSServiceSQS,
}
}
// SupportedServices is the map of supported services for each cloud provider.
var SupportedServices = map[CloudProviderType][]ServiceID{
CloudProviderTypeAWS: {
AWSServiceALB,
AWSServiceAPIGateway,
AWSServiceDynamoDB,
AWSServiceEC2,
AWSServiceECS,
AWSServiceEKS,
AWSServiceElastiCache,
AWSServiceLambda,
AWSServiceMSK,
AWSServiceRDS,
AWSServiceS3Sync,
AWSServiceSNS,
AWSServiceSQS,
},
}
// NewServiceID returns a new ServiceID from a string, validated against the supported services for the given cloud provider.
func NewServiceID(provider CloudProviderType, service string) (ServiceID, error) {
services, ok := SupportedServices[provider]
if !ok {
return ServiceID{}, errors.NewInvalidInputf(ErrCodeInvalidServiceID, "no services defined for cloud provider: %s", provider)
}
for _, s := range services {
if s.StringValue() == service {
return s, nil
}
}
return ServiceID{}, errors.NewInvalidInputf(ErrCodeInvalidServiceID, "invalid service id %q for cloud provider %s", service, provider)
}

View File

@@ -51,14 +51,6 @@ type DeprecatedUser struct {
Role Role `json:"role"`
}
type UpdatableUser struct {
DisplayName string `json:"displayName" required:"true"`
}
type PostableRole struct {
Name string `json:"name" required:"true"`
}
type PostableRegisterOrgAndAdmin struct {
Name string `json:"name"`
Email valuer.Email `json:"email"`
@@ -306,9 +298,6 @@ type UserStore interface {
// Get user by reset password token
GetUserByResetPasswordToken(ctx context.Context, token string) (*User, error)
// Get users having role by org id and role id
GetUsersByOrgIDAndRoleID(ctx context.Context, orgID valuer.UUID, roleID valuer.UUID) ([]*User, error)
// Transaction
RunInTx(ctx context.Context, cb func(ctx context.Context) error) error
}

View File

@@ -148,6 +148,8 @@ def build_builder_query(
step_interval: int = DEFAULT_STEP_INTERVAL,
group_by: Optional[List[str]] = None,
filter_expression: Optional[str] = None,
order_by: Optional[List[Dict]] = None,
limit: Optional[int] = None,
functions: Optional[List[Dict]] = None,
disabled: bool = False,
) -> Dict:
@@ -183,6 +185,12 @@ def build_builder_query(
if filter_expression:
spec["filter"] = {"expression": filter_expression}
if order_by:
spec["order"] = order_by
if limit is not None:
spec["limit"] = limit
if functions:
spec["functions"] = functions

View File

@@ -696,6 +696,7 @@ def test_traces_list_with_corrupt_data(
assert response.status_code == status_code
if response.status_code == HTTPStatus.OK:
if not results(traces):
# No results expected
assert response.json()["data"]["data"]["results"][0]["rows"] is None
@@ -2025,136 +2026,3 @@ def test_traces_fill_zero_formula_with_group_by(
expected_by_ts=expectations[service_name],
context=f"traces/fillZero/F1/{service_name}",
)
def test_traces_list_filter_by_trace_id(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_traces: Callable[[List[Traces]], None],
) -> None:
"""
Tests that filtering by trace_id:
1. Returns the matching span (narrow window, single bucket).
2. Does not return duplicate spans when the query window spans multiple
exponential buckets (>1 h)
3. Returns no results when the query window does not contain the trace.
"""
target_trace_id = TraceIdGenerator.trace_id()
other_trace_id = TraceIdGenerator.trace_id()
span_id_root = TraceIdGenerator.span_id()
other_span_id = TraceIdGenerator.span_id()
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
common_resources = {
"deployment.environment": "production",
"service.name": "trace-filter-service",
"cloud.provider": "integration",
}
insert_traces(
[
Traces(
timestamp=now - timedelta(seconds=10),
duration=timedelta(seconds=5),
trace_id=target_trace_id,
span_id=span_id_root,
parent_span_id="",
name="root-span",
kind=TracesKind.SPAN_KIND_SERVER,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources=common_resources,
attributes={"http.request.method": "GET"},
),
# span from a different trace — must not appear in results
Traces(
timestamp=now - timedelta(seconds=5),
duration=timedelta(seconds=1),
trace_id=other_trace_id,
span_id=other_span_id,
parent_span_id="",
name="other-root-span",
kind=TracesKind.SPAN_KIND_SERVER,
status_code=TracesStatusCode.STATUS_CODE_OK,
status_message="",
resources=common_resources,
attributes={},
),
]
)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
trace_filter = f"trace_id = '{target_trace_id}'"
def _query(start_ms: int, end_ms: int) -> List:
response = make_query_request(
signoz,
token,
start_ms=start_ms,
end_ms=end_ms,
request_type="raw",
queries=[
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "traces",
"disabled": False,
"limit": 100,
"offset": 0,
"filter": {"expression": trace_filter},
"order": [{"key": {"name": "timestamp"}, "direction": "desc"}],
"selectFields": [
{
"name": "name",
"fieldDataType": "string",
"fieldContext": "span",
"signal": "traces",
}
],
"having": {"expression": ""},
"aggregations": [{"expression": "count()"}],
},
}
],
)
assert response.status_code == HTTPStatus.OK
assert response.json()["status"] == "success"
return response.json()["data"]["data"]["results"][0]["rows"] or []
now_ms = int(now.timestamp() * 1000)
# --- Test 1: narrow window (single bucket, <1 h) ---
narrow_start_ms = int((now - timedelta(minutes=5)).timestamp() * 1000)
narrow_rows = _query(narrow_start_ms, now_ms)
assert (
len(narrow_rows) == 1
), f"Expected 1 span for trace_id filter (narrow window), got {len(narrow_rows)}"
assert narrow_rows[0]["data"]["span_id"] == span_id_root
assert narrow_rows[0]["data"]["trace_id"] == target_trace_id
# --- Test 2: wide window (>1 h, triggers multiple exponential buckets) ---
# should just return 1 span, not duplicate
wide_start_ms = int((now - timedelta(hours=12)).timestamp() * 1000)
wide_rows = _query(wide_start_ms, now_ms)
assert len(wide_rows) == 1, (
f"Expected 1 span for trace_id filter (wide window, multi-bucket), "
f"got {len(wide_rows)} — possible duplicate-span regression"
)
assert wide_rows[0]["data"]["span_id"] == span_id_root
assert wide_rows[0]["data"]["trace_id"] == target_trace_id
# --- Test 3: window that does not contain the trace returns no results ---
past_start_ms = int((now - timedelta(hours=6)).timestamp() * 1000)
past_end_ms = int((now - timedelta(hours=2)).timestamp() * 1000)
past_rows = _query(past_start_ms, past_end_ms)
assert len(past_rows) == 0, (
f"Expected 0 spans for trace_id filter outside time window, "
f"got {len(past_rows)}"
)

View File

@@ -2,16 +2,20 @@
Look at the cumulative_counters_1h.jsonl file for the relevant data
"""
import logging
import os
from datetime import datetime, timedelta, timezone
from http import HTTPStatus
from typing import Any, Callable, List
from typing import Any, Callable, List, Optional, Union
import pytest
from fixtures import types
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.metrics import Metrics
from fixtures.querier import (
build_builder_query,
build_order_by,
get_all_series,
get_series_values,
make_query_request,
@@ -71,16 +75,200 @@ def test_rate_with_steady_values_and_reset(
assert v["value"] >= 0, f"Rate should not be negative: {v['value']}"
def _assert_endpoint_rate_values(endpoint_values: dict) -> None:
# /health: 60 data points (t01-t60), steady +10/min
# rate = 10/60 = 0.167
if "/health" in endpoint_values:
health_values = endpoint_values["/health"]
assert (
len(health_values) >= 58
), f"Expected >= 58 values for /health, got {len(health_values)}"
count_steady_health = sum(1 for v in health_values if v["value"] == 0.167)
assert (
count_steady_health >= 57
), f"Expected >= 57 steady rate values (0.167) for /health, got {count_steady_health}"
# all /health rates should be 0.167 except possibly first/last due to boundaries
for v in health_values[1:-1]:
assert v["value"] == 0.167, f"Expected /health rate 0.167, got {v['value']}"
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
# rate = 20/60 = 0.333, gap causes lower averaged rate at boundary
if "/products" in endpoint_values:
products_values = endpoint_values["/products"]
assert (
len(products_values) >= 49
), f"Expected >= 49 values for /products, got {len(products_values)}"
count_steady_products = sum(1 for v in products_values if v["value"] == 0.333)
# most values should be 0.333, some boundary values differ due to 10-min gap
assert (
count_steady_products >= 46
), f"Expected >= 46 steady rate values (0.333) for /products, got {count_steady_products}"
# check that non-0.333 values are due to gap averaging (should be lower)
gap_boundary_values = [v["value"] for v in products_values if v["value"] != 0.333]
for val in gap_boundary_values:
assert (
0 < val < 0.333
), f"Gap boundary values should be between 0 and 0.333, got {val}"
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
# normal rate = 1/60 = 0.0167, spike rate = 50/60 = 0.833
if "/checkout" in endpoint_values:
checkout_values = endpoint_values["/checkout"]
assert (
len(checkout_values) >= 59
), f"Expected >= 59 values for /checkout, got {len(checkout_values)}"
count_steady_checkout = sum(1 for v in checkout_values if v["value"] == 0.0167)
assert (
count_steady_checkout >= 53
), f"Expected >= 53 steady rate values (0.0167) for /checkout, got {count_steady_checkout}"
# check that spike values exist (traffic spike +50/min at t40-t44)
count_spike_checkout = sum(1 for v in checkout_values if v["value"] == 0.833)
assert (
count_spike_checkout >= 4
), f"Expected >= 4 spike rate values (0.833) for /checkout, got {count_spike_checkout}"
# spike values should be consecutive
spike_indices = [
i for i, v in enumerate[Any](checkout_values) if v["value"] == 0.833
]
assert len(spike_indices) >= 4, f"Expected >= 4 spike indices, got {spike_indices}"
for i in range(1, len(spike_indices)):
assert (
spike_indices[i] == spike_indices[i - 1] + 1
), f"Spike indices should be consecutive, got {spike_indices}"
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
# rate = 5/60 = 0.0833
# reset at t31 causes: rate at t30 includes gap (lower), t31 has high rate after reset
if "/orders" in endpoint_values:
orders_values = endpoint_values["/orders"]
assert (
len(orders_values) >= 58
), f"Expected >= 58 values for /orders, got {len(orders_values)}"
count_steady_orders = sum(1 for v in orders_values if v["value"] == 0.0833)
assert (
count_steady_orders >= 55
), f"Expected >= 55 steady rate values (0.0833) for /orders, got {count_steady_orders}"
# check for counter reset effects - there should be some non-standard values
non_standard_orders = [v["value"] for v in orders_values if v["value"] != 0.0833]
assert (
len(non_standard_orders) >= 2
), f"Expected >= 2 non-standard values due to counter reset, got {non_standard_orders}"
# post-reset value should be higher (new counter value / interval)
high_rate_orders = [v for v in non_standard_orders if v > 0.0833]
assert (
len(high_rate_orders) >= 1
), f"Expected at least one high rate value after counter reset, got {non_standard_orders}"
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes
# Rate = 1/60 = 0.0167 during increment, 0 during flat periods
if "/users" in endpoint_values:
users_values = endpoint_values["/users"]
assert (
len(users_values) >= 54
), f"Expected >= 54 values for /users, got {len(users_values)}"
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
# most values should be 0 (flat periods between increments)
assert (
count_zero_users >= 40
), f"Expected >= 40 zero rate values for /users (sparse data), got {count_zero_users}"
# non-zero values should be 0.0167 (1/60 increment rate)
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
count_increment_rate = sum(1 for v in non_zero_users if v == 0.0167)
assert (
count_increment_rate >= 8
), f"Expected >= 8 increment rate values (0.0167) for /users, got {count_increment_rate}"
@pytest.mark.parametrize(
"metric_suffix,order_by,limit,expected_count,expected_endpoints",
[
(
"no_order",
None, # this is equivalent to sum(metric_name)
None,
5,
["/products", "/health", "/checkout", "/orders", "/users"],
),
(
"only_limit",
None, # this is equivalent to sum(metric_name)
3,
3,
["/products", "/health", "/checkout"],
),
(
"asc",
[build_order_by("endpoint", "asc")],
None,
5,
["/checkout", "/health", "/orders", "/products", "/users"],
),
(
"asc_lim3",
[build_order_by("endpoint", "asc")],
3,
3,
["/checkout", "/health", "/orders"],
),
(
"desc",
[build_order_by("endpoint", "desc")],
None,
5,
["/users", "/products", "/orders", "/health", "/checkout"],
),
(
"desc_lim3",
[build_order_by("endpoint", "desc")],
3,
3,
["/users", "/products", "/orders"],
),
(
"asc_metric_name",
[build_order_by("sum(test_rate_groupby_asc_metric_name)", "asc")],
None,
5,
["/users", "/orders", "/checkout", "/health", "/products"],
),
(
"asc_metric_name_lim3",
[build_order_by("sum(test_rate_groupby_asc_metric_name_lim3)", "asc")],
3,
3,
["/users", "/orders", "/checkout"],
),
(
"desc_metric_name",
[build_order_by("sum(test_rate_groupby_desc_metric_name)", "desc")],
None,
5,
["/products", "/health", "/checkout", "/orders", "/users"],
),
(
"desc_metric_name_lim3",
[build_order_by("sum(test_rate_groupby_desc_metric_name_lim3)", "desc")],
3,
3,
["/products", "/health", "/checkout"],
),
],
)
def test_rate_group_by_endpoint(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_metrics: Callable[[List[Metrics]], None],
metric_suffix: str,
order_by: Optional[List],
limit: Optional[int],
expected_count: int,
expected_endpoints: Union[set, List[str]],
) -> None:
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
metric_name = "test_rate_groupby"
metric_name = f"test_rate_groupby_{metric_suffix}"
metrics = Metrics.load_from_file(
CUMULATIVE_COUNTERS_FILE,
@@ -97,6 +285,8 @@ def test_rate_group_by_endpoint(
"sum",
temporality="cumulative",
group_by=["endpoint"],
order_by=order_by,
limit=limit,
)
response = make_query_request(signoz, token, start_ms, end_ms, [query])
@@ -105,10 +295,23 @@ def test_rate_group_by_endpoint(
data = response.json()
all_series = get_all_series(data, "A")
# Should have 5 different endpoints
assert (
len(all_series) == 5
), f"Expected 5 series for 5 endpoints, got {len(all_series)}"
len(all_series) == expected_count
), f"Expected {expected_count} series, got {len(all_series)}"
endpoint_labels = [
series.get("labels", [{}])[0].get("value", "unknown")
for series in all_series
]
if isinstance(expected_endpoints, set):
assert (
set(endpoint_labels) == expected_endpoints
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_labels)}"
else:
assert endpoint_labels == expected_endpoints, (
f"Expected endpoints {expected_endpoints}, got {endpoint_labels}"
)
# endpoint -> values
endpoint_values = {}
@@ -117,11 +320,6 @@ def test_rate_group_by_endpoint(
values = sorted(series.get("values", []), key=lambda x: x["timestamp"])
endpoint_values[endpoint] = values
expected_endpoints = {"/products", "/health", "/checkout", "/orders", "/users"}
assert (
set(endpoint_values.keys()) == expected_endpoints
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_values.keys())}"
# at no point rate should be negative
for endpoint, values in endpoint_values.items():
for v in values:
@@ -129,103 +327,4 @@ def test_rate_group_by_endpoint(
v["value"] >= 0
), f"Rate for {endpoint} should not be negative: {v['value']}"
# /health: 60 data points (t01-t60), steady +10/min
# rate = 10/60 = 0.167
health_values = endpoint_values["/health"]
assert (
len(health_values) >= 58
), f"Expected >= 58 values for /health, got {len(health_values)}"
count_steady_health = sum(1 for v in health_values if v["value"] == 0.167)
assert (
count_steady_health >= 57
), f"Expected >= 57 steady rate values (0.167) for /health, got {count_steady_health}"
# all /health rates should be 0.167 except possibly first/last due to boundaries
for v in health_values[1:-1]:
assert v["value"] == 0.167, f"Expected /health rate 0.167, got {v['value']}"
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
# rate = 20/60 = 0.333, gap causes lower averaged rate at boundary
products_values = endpoint_values["/products"]
assert (
len(products_values) >= 49
), f"Expected >= 49 values for /products, got {len(products_values)}"
count_steady_products = sum(1 for v in products_values if v["value"] == 0.333)
# most values should be 0.333, some boundary values differ due to 10-min gap
assert (
count_steady_products >= 46
), f"Expected >= 46 steady rate values (0.333) for /products, got {count_steady_products}"
# check that non-0.333 values are due to gap averaging (should be lower)
gap_boundary_values = [v["value"] for v in products_values if v["value"] != 0.333]
for val in gap_boundary_values:
assert (
0 < val < 0.333
), f"Gap boundary values should be between 0 and 0.333, got {val}"
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
# normal rate = 1/60 = 0.0167, spike rate = 50/60 = 0.833
checkout_values = endpoint_values["/checkout"]
assert (
len(checkout_values) >= 59
), f"Expected >= 59 values for /checkout, got {len(checkout_values)}"
count_steady_checkout = sum(1 for v in checkout_values if v["value"] == 0.0167)
assert (
count_steady_checkout >= 53
), f"Expected >= 53 steady rate values (0.0167) for /checkout, got {count_steady_checkout}"
# check that spike values exist (traffic spike +50/min at t40-t44)
count_spike_checkout = sum(1 for v in checkout_values if v["value"] == 0.833)
assert (
count_spike_checkout >= 4
), f"Expected >= 4 spike rate values (0.833) for /checkout, got {count_spike_checkout}"
# spike values should be consecutive
spike_indices = [
i for i, v in enumerate[Any](checkout_values) if v["value"] == 0.833
]
assert len(spike_indices) >= 4, f"Expected >= 4 spike indices, got {spike_indices}"
# consecutiveness
for i in range(1, len(spike_indices)):
assert (
spike_indices[i] == spike_indices[i - 1] + 1
), f"Spike indices should be consecutive, got {spike_indices}"
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
# rate = 5/60 = 0.0833
# reset at t31 causes: rate at t30 includes gap (lower), t31 has high rate after reset
orders_values = endpoint_values["/orders"]
assert (
len(orders_values) >= 58
), f"Expected >= 58 values for /orders, got {len(orders_values)}"
count_steady_orders = sum(1 for v in orders_values if v["value"] == 0.0833)
assert (
count_steady_orders >= 55
), f"Expected >= 55 steady rate values (0.0833) for /orders, got {count_steady_orders}"
# check for counter reset effects - there should be some non-standard values
non_standard_orders = [v["value"] for v in orders_values if v["value"] != 0.0833]
assert (
len(non_standard_orders) >= 2
), f"Expected >= 2 non-standard values due to counter reset, got {non_standard_orders}"
# post-reset value should be higher (new counter value / interval)
high_rate_orders = [v for v in non_standard_orders if v > 0.0833]
assert (
len(high_rate_orders) >= 1
), f"Expected at least one high rate value after counter reset, got {non_standard_orders}"
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes
# Rate = 1/60 = 0.0167 during increment, 0 during flat periods
users_values = endpoint_values["/users"]
assert (
len(users_values) >= 54
), f"Expected >= 54 values for /users, got {len(users_values)}"
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
# most values should be 0 (flat periods between increments)
assert (
count_zero_users >= 40
), f"Expected >= 40 zero rate values for /users (sparse data), got {count_zero_users}"
# non-zero values should be 0.0167 (1/60 increment rate)
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
count_increment_rate = sum(1 for v in non_zero_users if v == 0.0167)
assert (
count_increment_rate >= 8
), f"Expected >= 8 increment rate values (0.0167) for /users, got {count_increment_rate}"
_assert_endpoint_rate_values(endpoint_values)

View File

@@ -5,7 +5,7 @@ Look at the multi_temporality_counters_1h.jsonl file for the relevant data
import random
from datetime import datetime, timedelta, timezone
from http import HTTPStatus
from typing import Callable, List
from typing import Callable, List, Optional, Union
import pytest
@@ -14,6 +14,7 @@ from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.metrics import Metrics
from fixtures.querier import (
build_builder_query,
build_order_by,
get_all_series,
get_series_values,
make_query_request,
@@ -91,6 +92,198 @@ def test_with_steady_values_and_reset(
), f"{time_aggregation} should not be negative: {v['value']}"
def _assert_endpoint_group_values( # pylint: disable=too-many-arguments
endpoint_values: dict,
stable_health_value: float,
stable_products_value: float,
stable_checkout_value: float,
spike_checkout_value: float,
stable_orders_value: float,
spike_users_value: float,
time_aggregation: str,
) -> None:
# /health: 60 data points (t01-t60), steady +10/min
if "/health" in endpoint_values:
health_values = endpoint_values["/health"]
assert (
len(health_values) >= 58
), f"Expected >= 58 values for /health, got {len(health_values)}"
count_steady_health = sum(
1 for v in health_values if v["value"] == stable_health_value
)
assert (
count_steady_health >= 57
), f"Expected >= 57 steady rate values ({stable_health_value}) for /health, got {count_steady_health}"
# all /health rates should be stable except possibly first/last due to boundaries
for v in health_values[1:-1]:
assert (
v["value"] == stable_health_value
), f"Expected /health rate {stable_health_value}, got {v['value']}"
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
if "/products" in endpoint_values:
products_values = endpoint_values["/products"]
assert (
len(products_values) >= 49
), f"Expected >= 49 values for /products, got {len(products_values)}"
count_steady_products = sum(
1 for v in products_values if v["value"] == stable_products_value
)
# most values should be stable, some boundary values differ due to 10-min gap
assert (
count_steady_products >= 46
), f"Expected >= 46 steady rate values ({stable_products_value}) for /products, got {count_steady_products}"
# check that non-stable values are due to gap averaging (should be lower)
gap_boundary_values = [
v["value"] for v in products_values if v["value"] != stable_products_value
]
for val in gap_boundary_values:
assert (
0 < val < stable_products_value
), f"Gap boundary values should be between 0 and {stable_products_value}, got {val}"
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
if "/checkout" in endpoint_values:
checkout_values = endpoint_values["/checkout"]
assert (
len(checkout_values) >= 59
), f"Expected >= 59 values for /checkout, got {len(checkout_values)}"
count_steady_checkout = sum(
1 for v in checkout_values if v["value"] == stable_checkout_value
)
assert (
count_steady_checkout >= 53
), f"Expected >= 53 steady {time_aggregation} values ({stable_checkout_value}) for /checkout, got {count_steady_checkout}"
# check that spike values exist (traffic spike +50/min at t40-t44)
count_spike_checkout = sum(
1 for v in checkout_values if v["value"] == spike_checkout_value
)
assert (
count_spike_checkout >= 4
), f"Expected >= 4 spike {time_aggregation} values ({spike_checkout_value}) for /checkout, got {count_spike_checkout}"
# spike values should be consecutive
spike_indices = [
i for i, v in enumerate(checkout_values) if v["value"] == spike_checkout_value
]
assert len(spike_indices) >= 4, f"Expected >= 4 spike indices, got {spike_indices}"
for i in range(1, len(spike_indices)):
assert (
spike_indices[i] == spike_indices[i - 1] + 1
), f"Spike indices should be consecutive, got {spike_indices}"
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
# reset at t31 causes: rate/increase at t30 includes gap (lower), t31 has high rate after reset
if "/orders" in endpoint_values:
orders_values = endpoint_values["/orders"]
assert (
len(orders_values) >= 58
), f"Expected >= 58 values for /orders, got {len(orders_values)}"
count_steady_orders = sum(
1 for v in orders_values if v["value"] == stable_orders_value
)
assert (
count_steady_orders >= 55
), f"Expected >= 55 steady {time_aggregation} values ({stable_orders_value}) for /orders, got {count_steady_orders}"
# check for counter reset effects - there should be some non-standard values
non_standard_orders = [
v["value"] for v in orders_values if v["value"] != stable_orders_value
]
assert (
len(non_standard_orders) >= 2
), f"Expected >= 2 non-standard values due to counter reset, got {non_standard_orders}"
# post-reset value should be higher (new counter value / interval)
high_rate_orders = [v for v in non_standard_orders if v > stable_orders_value]
assert (
len(high_rate_orders) >= 1
), f"Expected at least one high {time_aggregation} value after counter reset, got {non_standard_orders}"
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes
if "/users" in endpoint_values:
users_values = endpoint_values["/users"]
assert (
len(users_values) >= 54
), f"Expected >= 54 values for /users, got {len(users_values)}"
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
# most values should be 0 (flat periods between increments)
assert (
count_zero_users >= 40
), f"Expected >= 40 zero {time_aggregation} values for /users (sparse data), got {count_zero_users}"
# non-zero values should be stable increment rate
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
count_increment_rate = sum(1 for v in non_zero_users if v == spike_users_value)
assert (
count_increment_rate >= 8
), f"Expected >= 8 increment {time_aggregation} values ({spike_users_value}) for /users, got {count_increment_rate}"
@pytest.mark.parametrize(
"order_suffix,order_by_spec,limit,expected_count,expected_endpoints",
[
(
"no_order",
None,
None,
5,
["/products", "/health", "/checkout", "/orders", "/users"],
),
(
"asc",
("endpoint", "asc"),
None,
5,
["/checkout", "/health", "/orders", "/products", "/users"],
),
(
"asc_lim3",
("endpoint", "asc"),
3,
3,
["/checkout", "/health", "/orders"],
),
(
"desc",
("endpoint", "desc"),
None,
5,
["/users", "/products", "/orders", "/health", "/checkout"],
),
(
"desc_lim3",
("endpoint", "desc"),
3,
3,
["/users", "/products", "/orders"],
),
(
"asc_metric_name",
("sum_metric", "asc"),
None,
5,
["/users", "/orders", "/checkout", "/health", "/products"],
),
(
"asc_metric_name_lim3",
("sum_metric", "asc"),
3,
3,
["/users", "/orders", "/checkout"],
),
(
"desc_metric_name",
("sum_metric", "desc"),
None,
5,
["/products", "/health", "/checkout", "/orders", "/users"],
),
(
"desc_metric_name_lim3",
("sum_metric", "desc"),
3,
3,
["/products", "/health", "/checkout"],
),
],
)
@pytest.mark.parametrize(
"time_aggregation, stable_health_value, stable_products_value, stable_checkout_value, spike_checkout_value, stable_orders_value, spike_users_value",
[
@@ -110,11 +303,24 @@ def test_group_by_endpoint(
spike_checkout_value: float,
stable_orders_value: float,
spike_users_value: float,
order_suffix: str,
order_by_spec: Optional[tuple],
limit: Optional[int],
expected_count: int,
expected_endpoints: Union[set, List[str]],
) -> None:
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
metric_name = f"test_{time_aggregation}_groupby"
metric_name = f"test_{time_aggregation}_groupby_{order_suffix}"
# Build order_by at runtime so metric name reflects actual time_aggregation
order_by = None
if order_by_spec is not None:
key, direction = order_by_spec
if key == "sum_metric":
key = f"sum({metric_name})"
order_by = [build_order_by(key, direction)]
metrics = Metrics.load_from_file(
MULTI_TEMPORALITY_FILE,
@@ -130,6 +336,8 @@ def test_group_by_endpoint(
time_aggregation,
"sum",
group_by=["endpoint"],
order_by=order_by,
limit=limit,
)
response = make_query_request(signoz, token, start_ms, end_ms, [query])
@@ -137,10 +345,23 @@ def test_group_by_endpoint(
data = response.json()
all_series = get_all_series(data, "A")
# Should have 5 different endpoints
assert (
len(all_series) == 5
), f"Expected 5 series for 5 endpoints, got {len(all_series)}"
len(all_series) == expected_count
), f"Expected {expected_count} series, got {len(all_series)}"
endpoint_labels = [
series.get("labels", [{}])[0].get("value", "unknown")
for series in all_series
]
if isinstance(expected_endpoints, set):
assert (
set(endpoint_labels) == expected_endpoints
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_labels)}"
else:
assert endpoint_labels == expected_endpoints, (
f"Expected endpoints {expected_endpoints}, got {endpoint_labels}"
)
# endpoint -> values
endpoint_values = {}
@@ -149,11 +370,6 @@ def test_group_by_endpoint(
values = sorted(series.get("values", []), key=lambda x: x["timestamp"])
endpoint_values[endpoint] = values
expected_endpoints = {"/products", "/health", "/checkout", "/orders", "/users"}
assert (
set(endpoint_values.keys()) == expected_endpoints
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_values.keys())}"
# at no point rate should be negative
for endpoint, values in endpoint_values.items():
for v in values:
@@ -161,117 +377,16 @@ def test_group_by_endpoint(
v["value"] >= 0
), f"Rate for {endpoint} should not be negative: {v['value']}"
# /health: 60 data points (t01-t60), steady +10/min
health_values = endpoint_values["/health"]
assert (
len(health_values) >= 58
), f"Expected >= 58 values for /health, got {len(health_values)}"
count_steady_health = sum(
1 for v in health_values if v["value"] == stable_health_value
_assert_endpoint_group_values(
endpoint_values,
stable_health_value,
stable_products_value,
stable_checkout_value,
spike_checkout_value,
stable_orders_value,
spike_users_value,
time_aggregation,
)
assert (
count_steady_health >= 57
), f"Expected >= 57 steady rate values ({stable_health_value}) for /health, got {count_steady_health}"
# all /health rates should be state except possibly first/last due to boundaries
for v in health_values[1:-1]:
assert (
v["value"] == stable_health_value
), f"Expected /health rate {stable_health_value}, got {v['value']}"
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
products_values = endpoint_values["/products"]
assert (
len(products_values) >= 49
), f"Expected >= 49 values for /products, got {len(products_values)}"
count_steady_products = sum(
1 for v in products_values if v["value"] == stable_products_value
)
# most values should be stable, some boundary values differ due to 10-min gap
assert (
count_steady_products >= 46
), f"Expected >= 46 steady rate values ({stable_products_value}) for /products, got {count_steady_products}"
# check that non-stable values are due to gap averaging (should be lower)
gap_boundary_values = [
v["value"] for v in products_values if v["value"] != stable_products_value
]
for val in gap_boundary_values:
assert (
0 < val < stable_products_value
), f"Gap boundary values should be between 0 and {stable_products_value}, got {val}"
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
checkout_values = endpoint_values["/checkout"]
assert (
len(checkout_values) >= 59
), f"Expected >= 59 values for /checkout, got {len(checkout_values)}"
count_steady_checkout = sum(
1 for v in checkout_values if v["value"] == stable_checkout_value
)
assert (
count_steady_checkout >= 53
), f"Expected >= 53 steady {time_aggregation} values ({stable_checkout_value}) for /checkout, got {count_steady_checkout}"
# check that spike values exist (traffic spike +50/min at t40-t44)
count_spike_checkout = sum(
1 for v in checkout_values if v["value"] == spike_checkout_value
)
assert (
count_spike_checkout >= 4
), f"Expected >= 4 spike {time_aggregation} values ({spike_checkout_value}) for /checkout, got {count_spike_checkout}"
# spike values should be consecutive
spike_indices = [
i for i, v in enumerate(checkout_values) if v["value"] == spike_checkout_value
]
assert len(spike_indices) >= 4, f"Expected >= 4 spike indices, got {spike_indices}"
# consecutiveness
for i in range(1, len(spike_indices)):
assert (
spike_indices[i] == spike_indices[i - 1] + 1
), f"Spike indices should be consecutive, got {spike_indices}"
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
# reset at t31 causes: rate/increase at t30 includes gap (lower), t31 has high rate after reset
orders_values = endpoint_values["/orders"]
assert (
len(orders_values) >= 58
), f"Expected >= 58 values for /orders, got {len(orders_values)}"
count_steady_orders = sum(
1 for v in orders_values if v["value"] == stable_orders_value
)
assert (
count_steady_orders >= 55
), f"Expected >= 55 steady {time_aggregation} values ({stable_orders_value}) for /orders, got {count_steady_orders}"
# check for counter reset effects - there should be some non-standard values
non_standard_orders = [
v["value"] for v in orders_values if v["value"] != stable_orders_value
]
assert (
len(non_standard_orders) >= 2
), f"Expected >= 2 non-standard values due to counter reset, got {non_standard_orders}"
# post-reset value should be higher (new counter value / interval)
high_rate_orders = [v for v in non_standard_orders if v > stable_orders_value]
assert (
len(high_rate_orders) >= 1
), f"Expected at least one high {time_aggregation} value after counter reset, got {non_standard_orders}"
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes
users_values = endpoint_values["/users"]
assert (
len(users_values) >= 54
), f"Expected >= 54 values for /users, got {len(users_values)}"
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
# most values should be 0 (flat periods between increments)
assert (
count_zero_users >= 40
), f"Expected >= 40 zero {time_aggregation} values for /users (sparse data), got {count_zero_users}"
# non-zero values should be 0.0167 (1/60 increment rate)
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
count_increment_rate = sum(1 for v in non_zero_users if v == spike_users_value)
assert (
count_increment_rate >= 8
), f"Expected >= 8 increment {time_aggregation} values ({spike_users_value}) for /users, got {count_increment_rate}"
@pytest.mark.parametrize(

View File

@@ -4,7 +4,7 @@ Look at the histogram_data_1h.jsonl file for the relevant data
from datetime import datetime, timedelta, timezone
from http import HTTPStatus
from typing import Callable, List
from typing import Callable, List, Optional, Union
import pytest
@@ -13,13 +13,16 @@ from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.metrics import Metrics
from fixtures.querier import (
build_builder_query,
build_order_by,
get_all_series,
get_series_values,
make_query_request,
)
from fixtures.utils import get_testdata_file_path
FILE = get_testdata_file_path("histogram_data_1h.jsonl")
FILE_WITH_MANY_GROUPS = get_testdata_file_path("histogram_data_1h_many_groups.jsonl")
@pytest.mark.parametrize(
@@ -525,3 +528,563 @@ def test_histogram_percentile_for_delta_service(
assert result_values[0]["value"] == zeroth_value
assert result_values[1]["value"] == first_value
assert result_values[-1]["value"] == last_value
def _assert_series_endpoint_labels(
series: list,
expected_endpoints: Union[set, List[str]],
prefix: str,
) -> None:
labels = [s.get("labels", [{}])[0].get("value", "unknown") for s in series]
if isinstance(expected_endpoints, set):
assert (
set(labels) == expected_endpoints
), f"Expected {prefix} endpoints {expected_endpoints}, got {set(labels)}"
else:
assert labels == expected_endpoints, (
f"Expected {prefix} endpoints in order {expected_endpoints}, got {labels}"
)
@pytest.mark.parametrize(
"order_suffix,order_by,limit,expected_count,expected_endpoints",
[
(
"no_order",
None,
None,
3,
["/checkout", "/health", "/orders"],
),
(
"asc",
[build_order_by("endpoint", "asc")],
None,
3,
["/checkout", "/health", "/orders"],
),
(
"asc_lim2",
[build_order_by("endpoint", "asc")],
2,
2,
["/checkout", "/health"],
),
(
"desc",
[build_order_by("endpoint", "desc")],
None,
3,
["/orders", "/health", "/checkout"],
),
(
"desc_lim2",
[build_order_by("endpoint", "desc")],
2,
2,
["/orders", "/health"],
),
(
"asc_metric_name",
[build_order_by("count(test_histogram_count_groupby_asc_metric_name)", "asc")],
None,
3,
["/health", "/orders", "/checkout"], ## health and orders have the same size so they are then sorted endpoint as a tiebreaker
),
(
"asc_metric_name_lim2",
[build_order_by("count(test_histogram_count_groupby_asc_metric_name_lim2)", "asc")],
2,
2,
["/health", "/orders"],
),
(
"desc_metric_name",
[build_order_by("count(test_histogram_count_groupby_desc_metric_name)", "desc")],
None,
3,
["/checkout", "/health", "/orders"], ## health and orders have the same size so they are then sorted endpoint as a tiebreaker
),
(
"desc_metric_name_lim2",
[build_order_by("count(test_histogram_count_groupby_desc_metric_name_lim2)", "desc")],
2,
2,
["/checkout", "/health"],
),
],
)
def test_histogram_count_group_by_endpoint(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_metrics: Callable[[List[Metrics]], None],
order_suffix: str,
order_by: Optional[List],
limit: Optional[int],
expected_count: int,
expected_endpoints: Union[set, List[str]],
) -> None:
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
metric_name = f"test_histogram_count_groupby_{order_suffix}"
metrics = Metrics.load_from_file(
FILE,
base_time=now - timedelta(minutes=60),
metric_name_override=metric_name,
)
insert_metrics(metrics)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
query_count = build_builder_query(
"A",
metric_name,
"increase",
"count",
comparisonSpaceAggregationParam={"threshold": 1000, "operator": "<="},
group_by=["endpoint"],
order_by=order_by,
limit=limit,
)
response = make_query_request(signoz, token, start_ms, end_ms, [query_count])
assert response.status_code == HTTPStatus.OK
data = response.json()
count_all_series = get_all_series(data, "A")
assert (
len(count_all_series) == expected_count
), f"Expected {expected_count} series, got {len(count_all_series)}"
_assert_series_endpoint_labels(count_all_series, expected_endpoints, "count")
count_values = {}
for series in count_all_series:
endpoint = series.get("labels", [{}])[0].get("value", "unknown")
count_values[endpoint] = sorted(
series.get("values", []), key=lambda x: x["timestamp"]
)
for endpoint, values in count_values.items():
for v in values:
assert v["value"] >= 0, f"Count for {endpoint} should not be negative: {v['value']}"
# /health (cumulative, service=api): 59 points, increase starts at 11/min → 69/min
if "/health" in count_values:
vals = count_values["/health"]
assert vals[0]["value"] == 11, f"Expected /health count first=11, got {vals[0]['value']}"
assert vals[-1]["value"] == 69, f"Expected /health count last=69, got {vals[-1]['value']}"
# /orders (cumulative, service=api): same distribution as /health
if "/orders" in count_values:
vals = count_values["/orders"]
assert vals[0]["value"] == 11, f"Expected /orders count first=11, got {vals[0]['value']}"
assert vals[-1]["value"] == 69, f"Expected /orders count last=69, got {vals[-1]['value']}"
# /checkout (delta, service=web): 60 points, zeroth=12345 (raw delta), then 11/min → 69/min
if "/checkout" in count_values:
vals = count_values["/checkout"]
assert vals[0]["value"] == 12345, f"Expected /checkout count zeroth=12345, got {vals[0]['value']}"
assert vals[1]["value"] == 11, f"Expected /checkout count first=11, got {vals[1]['value']}"
assert vals[-1]["value"] == 69, f"Expected /checkout count last=69, got {vals[-1]['value']}"
@pytest.mark.parametrize(
"order_suffix,order_by,limit,expected_count,expected_endpoints",
[
(
"no_order",
None,
None,
4,
[ "/checkout", "/health", "/orders", "/coupon"],
),
(
"only_limit",
None,
1,
1,
[ "/checkout"], ##health and checkout have the same size so they are then sorted endpoint as a tiebreaker, and only checkout makes the limit
),
(
"asc",
[build_order_by("endpoint", "asc")],
None,
4,
[ "/checkout", "/coupon", "/health", "/orders"],
),
(
"asc_lim2",
[build_order_by("endpoint", "asc")],
2,
2,
["/checkout", "/coupon"],
),
(
"desc",
[build_order_by("endpoint", "desc")],
None,
4,
["/orders", "/health", "/coupon", "/checkout"],
),
(
"desc_lim2",
[build_order_by("endpoint", "desc")],
2,
2,
["/orders", "/health"],
),
(
"asc_metric_name",
[build_order_by("p75(test_histogram_p75_groupby_asc_metric_name)", "asc")],
None,
4,
["/coupon", "/orders", "/checkout", "/health"], ## health and checkout have the same size so they are then sorted endpoint as a tiebreaker
),
(
"asc_metric_name_lim2",
[build_order_by("p75(test_histogram_p75_groupby_asc_metric_name_lim2)", "asc")],
2,
2,
["/coupon", "/orders"],
),
(
"asc_metric_name_lim3",
[build_order_by("p75(test_histogram_p75_groupby_asc_metric_name_lim3)", "asc")],
3,
3,
["/coupon", "/orders", "/checkout"], ##health and checkout have the same size so they are then sorted endpoint as a tiebreaker, and only checkout makes the limit
),
(
"desc_metric_name",
[build_order_by("p75(test_histogram_p75_groupby_desc_metric_name)", "desc")],
None,
4,
[ "/checkout", "/health", "/orders", "/coupon"],
),
(
"desc_metric_name_lim2",
[build_order_by("p75(test_histogram_p75_groupby_desc_metric_name_lim2)", "desc")],
2,
2,
[ "/checkout", "/health"],
),
(
"desc_metric_name_lim2",
[build_order_by("p75(test_histogram_p75_groupby_desc_metric_name_lim2)", "desc")],
1,
1,
[ "/checkout"], ##health and checkout have the same size so they are then sorted endpoint as a tiebreaker, and only checkout makes the limit
),
],
)
def test_histogram_percentile_group_by_endpoint(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_metrics: Callable[[List[Metrics]], None],
order_suffix: str,
order_by: Optional[List],
limit: Optional[int],
expected_count: int,
expected_endpoints: Union[set, List[str]],
) -> None:
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
metric_name = f"test_histogram_p75_groupby_{order_suffix}"
metrics = Metrics.load_from_file(
FILE_WITH_MANY_GROUPS,
base_time=now - timedelta(minutes=60),
metric_name_override=metric_name,
)
insert_metrics(metrics)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
query_p75 = build_builder_query(
"A",
metric_name,
"doesnotreallymatter",
"p75",
group_by=["endpoint"],
order_by=order_by,
limit=limit,
)
response = make_query_request(signoz, token, start_ms, end_ms, [query_p75])
assert response.status_code == HTTPStatus.OK
data = response.json()
p75_series = get_all_series(data, "A")
for series in p75_series:
endpoint = series.get("labels", [{}])[0].get("value", "unknown")
assert (
len(p75_series) == expected_count
), f"Expected {expected_count} p75 series, got {len(p75_series)}"
_assert_series_endpoint_labels(p75_series, expected_endpoints, "p75")
p75_values = {}
for series in p75_series:
endpoint = series.get("labels", [{}])[0].get("value", "unknown")
p75_values[endpoint] = sorted(
series.get("values", []), key=lambda x: x["timestamp"]
)
for endpoint, values in p75_values.items():
for v in values:
assert v["value"] >= 0, f"p75 for {endpoint} should not be negative: {v['value']}"
# /health (cumulative, service=api)
if "/health" in p75_values:
vals = p75_values["/health"]
assert vals[0]["value"] == 6000, f"Expected /health p75 first=6000, got {vals[0]['value']}"
assert vals[-1]["value"] == 6000, f"Expected /health p75 last=6000, got {vals[-1]['value']}"
# /orders (cumulative, service=api): same distribution as /health
if "/orders" in p75_values:
vals = p75_values["/orders"]
assert vals[0]["value"] == 4500, f"Expected /orders p75 first=4500, got {vals[0]['value']}"
assert vals[-1]["value"] == 4500, f"Expected /orders p75 last=4500, got {vals[-1]['value']}"
# /checkout (delta, service=web): 60 points
if "/checkout" in p75_values:
vals = p75_values["/checkout"]
assert vals[0]["value"] == 6000, f"Expected /checkout p75 zeroth=6000, got {vals[0]['value']}"
assert vals[1]["value"] == 6000, f"Expected /checkout p75 first=6000, got {vals[1]['value']}"
assert vals[-1]["value"] == 6000, f"Expected /checkout p75 last=6000, got {vals[-1]['value']}"
# /coupon (delta, service=web): 60 points
if "/coupon" in p75_values:
vals = p75_values["/coupon"]
assert vals[0]["value"] == 1125, f"Expected /coupon p75 zeroth=1125, got {vals[0]['value']}"
assert vals[1]["value"] == 1125, f"Expected /coupon p75 first=1125, got {vals[1]['value']}"
assert vals[-1]["value"] == 1125, f"Expected /coupon p75 last=1125, got {vals[-1]['value']}"
@pytest.mark.parametrize(
"order_suffix,order_by,limit,expected_count,expected_endpoints, expected_status_codes",
[
(
"no_order",
None,
None,
5,
[ "/checkout", "/health", "/orders", "/coupon", "/coupon"], ## coupon has 200 and 500 status codes so it will appear twice
[ "200", "200", "200", "200", "500"],
),
(
"only_limit",
None,
1,
1,
[ "/checkout"], ##health and checkout have the same size so they are then sorted endpoint as a tiebreaker, and only checkout makes the limit
[ "200"]
),
(
"asc_endpoint",
[build_order_by("endpoint", "asc")],
None,
5,
[ "/checkout", "/coupon", "/coupon", "/health", "/orders"],
[ "200", "200", "500", "200", "200"],
),
(
"asc_endpoint_status_code",
[build_order_by("endpoint", "asc"), build_order_by("status_code", "asc")],
None,
5,
[ "/checkout", "/coupon", "/coupon", "/health", "/orders"],
[ "200", "200", "500", "200", "200"],
),
(
"asc_status_code_endpoint",
[build_order_by("status_code", "asc"), build_order_by("endpoint", "asc")],
None,
5,
[ "/checkout", "/coupon", "/health", "/orders", "/coupon"],
[ "200", "200", "200", "200", "500"],
),
(
"asc_endpoint_limit_2",
[build_order_by("endpoint", "asc")],
2,
2,
[ "/checkout", "/coupon"],
[ "200", "200"],
),
(
"asc_endpoint_status_code_limit_2",
[build_order_by("endpoint", "asc"), build_order_by("status_code", "asc")],
2,
2,
[ "/checkout", "/coupon"],
[ "200", "200"],
),
(
"asc_status_code_endpoint_limit_4",
[build_order_by("status_code", "asc"), build_order_by("endpoint", "asc")],
4,
4,
[ "/checkout", "/coupon", "/health", "/orders"],
[ "200", "200", "200", "200"],
),
(
"desc_endpoint",
[build_order_by("endpoint", "desc")],
None,
5,
["/orders", "/health", "/coupon", "/coupon", "/checkout"],
[ "200", "200", "200", "500", "200"],
),
(
"desc_endpoint_status_code",
[build_order_by("endpoint", "desc"), build_order_by("status_code", "desc")],
None,
5,
["/orders", "/health", "/coupon", "/coupon", "/checkout"],
[ "200", "200", "500", "200", "200"],
),
(
"desc_status_code_endpoint",
[build_order_by("status_code", "desc"), build_order_by("endpoint", "desc")],
None,
5,
["/coupon", "/orders", "/health", "/coupon", "/checkout"],
[ "500", "200", "200", "200", "200"],
),
(
"desc_endpoint_limit2",
[build_order_by("endpoint", "desc")],
3,
3,
["/orders", "/health", "/coupon"],
[ "200", "200", "200"],
),
(
"desc_endpoint_status_code_limit3",
[build_order_by("endpoint", "desc"), build_order_by("status_code", "desc")],
3,
3,
["/orders", "/health", "/coupon"],
[ "200", "200", "500"],
),
(
"desc_status_code_endpoint_limit2",
[build_order_by("status_code", "desc"), build_order_by("endpoint", "desc")],
2,
2,
["/coupon", "/orders"],
[ "500", "200"],
),
],
)
def test_histogram_percentile_group_by_endpoint_and_status_code(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_metrics: Callable[[List[Metrics]], None],
order_suffix: str,
order_by: Optional[List],
limit: Optional[int],
expected_count: int,
expected_endpoints: Union[set, List[str]],
expected_status_codes: Union[set, List[str]],
) -> None:
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
metric_name = f"test_histogram_p75_groupby_{order_suffix}"
metrics = Metrics.load_from_file(
FILE_WITH_MANY_GROUPS,
base_time=now - timedelta(minutes=60),
metric_name_override=metric_name,
)
insert_metrics(metrics)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
query_p75 = build_builder_query(
"A",
metric_name,
"doesnotreallymatter",
"p75",
group_by=["endpoint", "status_code"],
order_by=order_by,
limit=limit,
)
response = make_query_request(signoz, token, start_ms, end_ms, [query_p75])
assert response.status_code == HTTPStatus.OK
data = response.json()
p75_series = get_all_series(data, "A")
for series in p75_series:
endpoint = series.get("labels", [{}])[0].get("value", "unknown")
status_code = series.get("labels", [{}])[1].get("value", "unknown")
assert (
len(p75_series) == expected_count
), f"Expected {expected_count} p75 series, got {len(p75_series)}"
_assert_series_endpoint_labels(p75_series, expected_endpoints, "p75")
endpoints = [s.get("labels", [{}])[0].get("value", "unknown") for s in p75_series]
assert endpoints == expected_endpoints, (
f"Expected p75 endpoints in order {expected_endpoints}, got {endpoints}"
)
status_codes = [s.get("labels", [{}])[1].get("value", "unknown") for s in p75_series]
assert status_codes == expected_status_codes, (
f"Expected p75 endpoints in order {expected_status_codes}, got {status_codes}"
)
p75_values = {}
for series in p75_series:
endpoint = series.get("labels", [{}])[0].get("value", "unknown")
status_code = series.get("labels", [{}])[1].get("value", "unknown")
p75_values[endpoint+status_code] = sorted(
series.get("values", []), key=lambda x: x["timestamp"]
)
for endpoint, values in p75_values.items():
for v in values:
assert v["value"] >= 0, f"p75 for {endpoint} should not be negative: {v['value']}"
# /health (cumulative, service=api)
if "/health200" in p75_values:
vals = p75_values["/health200"]
assert vals[0]["value"] == 6000, f"Expected /health p75 first=6000, got {vals[0]['value']}"
assert vals[-1]["value"] == 6000, f"Expected /health p75 last=6000, got {vals[-1]['value']}"
# /orders (cumulative, service=api): same distribution as /health
if "/orders200" in p75_values:
vals = p75_values["/orders200"]
assert vals[0]["value"] == 4500, f"Expected /orders p75 first=4500, got {vals[0]['value']}"
assert vals[-1]["value"] == 4500, f"Expected /orders p75 last=4500, got {vals[-1]['value']}"
# /checkout (delta, service=web): 60 points
if "/checkout200" in p75_values:
vals = p75_values["/checkout200"]
assert vals[0]["value"] == 6000, f"Expected /checkout p75 zeroth=6000, got {vals[0]['value']}"
assert vals[1]["value"] == 6000, f"Expected /checkout p75 first=6000, got {vals[1]['value']}"
assert vals[-1]["value"] == 6000, f"Expected /checkout p75 last=6000, got {vals[-1]['value']}"
# /coupon (delta, service=web): 60 points
if "/coupon200" in p75_values:
vals = p75_values["/coupon200"]
assert vals[0]["value"] == 1250, f"Expected /coupon200 p75 zeroth=1250, got {vals[0]['value']}"
assert vals[1]["value"] == 1250, f"Expected /coupon200 p75 first=1250, got {vals[1]['value']}"
assert vals[-1]["value"] == 1250, f"Expected /coupon200 p75 last=1250, got {vals[-1]['value']}"
if "/coupon500" in p75_values:
vals = p75_values["/coupon500"]
assert vals[0]["value"] == 750, f"Expected /coupon500 p75 zeroth=750, got {vals[0]['value']}"
assert vals[1]["value"] == 750, f"Expected /coupon500 p75 first=750, got {vals[1]['value']}"
assert vals[-1]["value"] == 750, f"Expected /coupon500 p75 last=750, got {vals[-1]['value']}"

View File

@@ -1,6 +1,6 @@
from datetime import datetime, timedelta, timezone
from http import HTTPStatus
from typing import Callable, List
from typing import Callable, List, Optional, Union
import pytest
@@ -9,6 +9,8 @@ from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.metrics import Metrics
from fixtures.querier import (
build_builder_query,
build_order_by,
get_all_series,
get_series_values,
make_query_request,
)
@@ -139,3 +141,137 @@ def test_for_multiple_aggregations(
assert result_values[19]["value"] == twentieth_min_val
assert result_values[20]["value"] == twenty_first_min_val
assert result_values[30]["value"] == thirty_first_min_val
@pytest.mark.parametrize(
"metric_suffix,order_by,limit,expected_count,expected_services",
[
(
"no_order",
None, # default ordering: desc by avg of all metric values for a group
None,
3,
["lab", "web", "api"], # sum of all values: lab=42000, api=36000, web=34000. avg of all sums: lab=700, api=600, web=680
),
(
"only_limit",
None,
2,
2,
["lab", "web"], # top 2 by default desc: lab=42000, api=36000
),
(
"asc",
[build_order_by("service", "asc")],
None,
3,
["api", "lab", "web"],
),
(
"asc_lim2",
[build_order_by("service", "asc")],
2,
2,
["api", "lab"],
),
(
"desc",
[build_order_by("service", "desc")],
None,
3,
["web", "lab", "api"],
),
(
"desc_lim2",
[build_order_by("service", "desc")],
2,
2,
["web", "lab"],
),
(
"asc_metric_name",
[build_order_by("sum(test_gauge_groupby_asc_metric_name)", "asc")],
None,
3,
["api", "web", "lab"],
),
(
"asc_metric_name_lim2",
[build_order_by("sum(test_gauge_groupby_asc_metric_name_lim2)", "asc")],
2,
2,
["api", "web"],
),
(
"desc_metric_name",
[build_order_by("sum(test_gauge_groupby_desc_metric_name)", "desc")],
None,
3,
["lab", "web", "api"],
),
(
"desc_metric_name_lim2",
[build_order_by("sum(test_gauge_groupby_desc_metric_name_lim2)", "desc")],
2,
2,
["lab", "web"],
),
],
)
def test_gauge_group_by_service(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_metrics: Callable[[List[Metrics]], None],
metric_suffix: str,
order_by: Optional[List],
limit: Optional[int],
expected_count: int,
expected_services: Union[set, List[str]],
) -> None:
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
metric_name = f"test_gauge_groupby_{metric_suffix}"
metrics = Metrics.load_from_file(
FILE,
base_time=now - timedelta(minutes=60),
metric_name_override=metric_name,
)
insert_metrics(metrics)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
query = build_builder_query(
"A",
metric_name,
"max",
"sum",
group_by=["service"],
order_by=order_by,
limit=limit,
)
response = make_query_request(signoz, token, start_ms, end_ms, [query])
assert response.status_code == HTTPStatus.OK
data = response.json()
all_series = get_all_series(data, "A")
assert (
len(all_series) == expected_count
), f"Expected {expected_count} series, got {len(all_series)}"
service_labels = [
series.get("labels", [{}])[0].get("value", "unknown")
for series in all_series
]
if isinstance(expected_services, set):
assert (
set(service_labels) == expected_services
), f"Expected services {expected_services}, got {set(service_labels)}"
else:
assert service_labels == expected_services, (
f"Expected services {expected_services}, got {service_labels}"
)

View File

@@ -5,13 +5,16 @@ Look at the delta_counters_1h.jsonl file for the relevant data
import os
from datetime import datetime, timedelta, timezone
from http import HTTPStatus
from typing import Any, Callable, List
from typing import Any, Callable, List, Optional, Union
import pytest
from fixtures import types
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.metrics import Metrics
from fixtures.querier import (
build_builder_query,
build_order_by,
get_all_series,
get_series_values,
make_query_request,
@@ -69,16 +72,61 @@ def test_rate_with_steady_values_and_reset(
assert v["value"] >= 0, f"Rate should not be negative: {v['value']}"
@pytest.mark.parametrize(
"order_suffix,order_by,limit,expected_count,expected_endpoints",
[
(
"no_order",
None,
None,
5,
{"/products", "/health", "/checkout", "/orders", "/users"},
),
(
"asc",
[build_order_by("endpoint", "asc")],
None,
5,
["/checkout", "/health", "/orders", "/products", "/users"],
),
(
"asc_lim3",
[build_order_by("endpoint", "asc")],
3,
3,
["/checkout", "/health", "/orders"],
),
(
"desc",
[build_order_by("endpoint", "desc")],
None,
5,
["/users", "/products", "/orders", "/health", "/checkout"],
),
(
"desc_lim3",
[build_order_by("endpoint", "desc")],
3,
3,
["/users", "/products", "/orders"],
),
],
)
def test_rate_group_by_endpoint(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_metrics: Callable[[List[Metrics]], None],
order_suffix: str,
order_by: Optional[List],
limit: Optional[int],
expected_count: int,
expected_endpoints: Union[set, List[str]],
) -> None:
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
metric_name = "test_rate_groupby"
metric_name = f"test_rate_groupby_{order_suffix}"
metrics = Metrics.load_from_file(
DELTA_COUNTERS_FILE,
@@ -94,6 +142,8 @@ def test_rate_group_by_endpoint(
"rate",
"sum",
group_by=["endpoint"],
order_by=order_by,
limit=limit,
)
response = make_query_request(signoz, token, start_ms, end_ms, [query])
@@ -102,10 +152,23 @@ def test_rate_group_by_endpoint(
data = response.json()
all_series = get_all_series(data, "A")
# Should have 5 different endpoints
assert (
len(all_series) == 5
), f"Expected 5 series for 5 endpoints, got {len(all_series)}"
len(all_series) == expected_count
), f"Expected {expected_count} series, got {len(all_series)}"
endpoint_labels = [
series.get("labels", [{}])[0].get("value", "unknown")
for series in all_series
]
if isinstance(expected_endpoints, set):
assert (
set(endpoint_labels) == expected_endpoints
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_labels)}"
else:
assert endpoint_labels == expected_endpoints, (
f"Expected endpoints {expected_endpoints}, got {endpoint_labels}"
)
# endpoint -> values
endpoint_values = {}
@@ -114,11 +177,6 @@ def test_rate_group_by_endpoint(
values = sorted(series.get("values", []), key=lambda x: x["timestamp"])
endpoint_values[endpoint] = values
expected_endpoints = {"/products", "/health", "/checkout", "/orders", "/users"}
assert (
set(endpoint_values.keys()) == expected_endpoints
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_values.keys())}"
# at no point rate should be negative
for endpoint, values in endpoint_values.items():
for v in values:
@@ -128,93 +186,95 @@ def test_rate_group_by_endpoint(
# /health: 60 data points (t01-t60), steady +10/min
# rate = 10/60 = 0.167
health_values = endpoint_values["/health"]
assert (
len(health_values) == 60
), f"Expected 60 values for /health, got {len(health_values)}"
count_steady_health = sum(1 for v in health_values if v["value"] == 0.167)
assert (
count_steady_health == 60
), f"Expected == 60 steady rate values (0.167) for /health, got {count_steady_health}"
# all /health rates should be 0.167 except possibly first/last due to boundaries
for v in health_values[1:-1]:
assert v["value"] == 0.167, f"Expected /health rate 0.167, got {v['value']}"
if "/health" in endpoint_values:
health_values = endpoint_values["/health"]
assert (
len(health_values) == 60
), f"Expected 60 values for /health, got {len(health_values)}"
count_steady_health = sum(1 for v in health_values if v["value"] == 0.167)
assert (
count_steady_health == 60
), f"Expected == 60 steady rate values (0.167) for /health, got {count_steady_health}"
# all /health rates should be 0.167 except possibly first/last due to boundaries
for v in health_values[1:-1]:
assert v["value"] == 0.167, f"Expected /health rate 0.167, got {v['value']}"
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
# rate = 20/60 = 0.333, gap causes lower averaged rate at boundary
products_values = endpoint_values["/products"]
assert (
len(products_values) == 51
), f"Expected 51 values for /products, got {len(products_values)}"
count_steady_products = sum(1 for v in products_values if v["value"] == 0.333)
assert (
count_steady_products == 51
), f"Expected 51 steady rate values (0.333) for /products, got {count_steady_products}"
if "/products" in endpoint_values:
products_values = endpoint_values["/products"]
assert (
len(products_values) == 51
), f"Expected 51 values for /products, got {len(products_values)}"
count_steady_products = sum(1 for v in products_values if v["value"] == 0.333)
assert (
count_steady_products == 51
), f"Expected 51 steady rate values (0.333) for /products, got {count_steady_products}"
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
# normal rate = 1/60 = 0.0167, spike rate = 50/60 = 0.833
checkout_values = endpoint_values["/checkout"]
assert (
len(checkout_values) == 61
), f"Expected 61 values for /checkout, got {len(checkout_values)}"
count_steady_checkout = sum(1 for v in checkout_values if v["value"] == 0.0167)
assert (
count_steady_checkout == 56
), f"Expected 56 steady rate values (0.0167) for /checkout, got {count_steady_checkout}"
# check that spike values exist (traffic spike +50/min at t40-t44)
count_spike_checkout = sum(1 for v in checkout_values if v["value"] == 0.833)
assert (
count_spike_checkout == 5
), f"Expected 5 spike rate values (0.833) for /checkout, got {count_spike_checkout}"
# spike values should be consecutive
spike_indices = [
i for i, v in enumerate[Any](checkout_values) if v["value"] == 0.833
]
assert len(spike_indices) == 5, f"Expected 5 spike indices, got {spike_indices}"
# consecutiveness
for i in range(1, len(spike_indices)):
if "/checkout" in endpoint_values:
checkout_values = endpoint_values["/checkout"]
assert (
spike_indices[i] == spike_indices[i - 1] + 1
), f"Spike indices should be consecutive, got {spike_indices}"
len(checkout_values) == 61
), f"Expected 61 values for /checkout, got {len(checkout_values)}"
count_steady_checkout = sum(1 for v in checkout_values if v["value"] == 0.0167)
assert (
count_steady_checkout == 56
), f"Expected 56 steady rate values (0.0167) for /checkout, got {count_steady_checkout}"
# check that spike values exist (traffic spike +50/min at t40-t44)
count_spike_checkout = sum(1 for v in checkout_values if v["value"] == 0.833)
assert (
count_spike_checkout == 5
), f"Expected 5 spike rate values (0.833) for /checkout, got {count_spike_checkout}"
# spike values should be consecutive
spike_indices = [
i for i, v in enumerate[Any](checkout_values) if v["value"] == 0.833
]
assert len(spike_indices) == 5, f"Expected 5 spike indices, got {spike_indices}"
for i in range(1, len(spike_indices)):
assert (
spike_indices[i] == spike_indices[i - 1] + 1
), f"Spike indices should be consecutive, got {spike_indices}"
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
# rate = 5/60 = 0.0833
# reset at t31 causes: rate at t30 includes gap (lower), t31 has high rate after reset
orders_values = endpoint_values["/orders"]
assert (
len(orders_values) == 60
), f"Expected 59 values for /orders, got {len(orders_values)}"
count_steady_orders = sum(1 for v in orders_values if v["value"] == 0.0833)
assert (
count_steady_orders == 58
), f"Expected 58 steady rate values (0.0833) for /orders, got {count_steady_orders}"
# check for counter reset effects - there should be some non-standard values
non_standard_orders = [v["value"] for v in orders_values if v["value"] != 0.0833]
assert (
len(non_standard_orders) == 2
), f"Expected 2 non-standard values due to counter reset, got {non_standard_orders}"
# post-reset value should be higher (new counter value / interval)
high_rate_orders = [v for v in non_standard_orders if v > 0.0833]
assert (
len(high_rate_orders) == 1
), f"Expected one high rate value after counter reset, got {non_standard_orders}"
if "/orders" in endpoint_values:
orders_values = endpoint_values["/orders"]
assert (
len(orders_values) == 60
), f"Expected 60 values for /orders, got {len(orders_values)}"
count_steady_orders = sum(1 for v in orders_values if v["value"] == 0.0833)
assert (
count_steady_orders == 58
), f"Expected 58 steady rate values (0.0833) for /orders, got {count_steady_orders}"
# check for counter reset effects - there should be some non-standard values
non_standard_orders = [v["value"] for v in orders_values if v["value"] != 0.0833]
assert (
len(non_standard_orders) == 2
), f"Expected 2 non-standard values due to counter reset, got {non_standard_orders}"
# post-reset value should be higher (new counter value / interval)
high_rate_orders = [v for v in non_standard_orders if v > 0.0833]
assert (
len(high_rate_orders) == 1
), f"Expected one high rate value after counter reset, got {non_standard_orders}"
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes (12 of them)
# Rate = 1/60 = 0.0167 during increment, 0 during flat periods
users_values = endpoint_values["/users"]
assert (
len(users_values) == 56
), f"Expected 56 values for /users, got {len(users_values)}"
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
# most values should be 0 (flat periods between increments)
assert (
count_zero_users == 44
), f"Expected 44 zero rate values for /users (sparse data), got {count_zero_users}"
# non-zero values should be 0.0167 (1/60 increment rate)
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
count_increment_rate = sum(1 for v in non_zero_users if v == 0.0167)
assert (
count_increment_rate == 12
), f"Expected 12 increment rate values (0.0167) for /users, got {count_increment_rate}"
if "/users" in endpoint_values:
users_values = endpoint_values["/users"]
assert (
len(users_values) == 56
), f"Expected 56 values for /users, got {len(users_values)}"
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
# most values should be 0 (flat periods between increments)
assert (
count_zero_users == 44
), f"Expected 44 zero rate values for /users (sparse data), got {count_zero_users}"
# non-zero values should be 0.0167 (1/60 increment rate)
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
count_increment_rate = sum(1 for v in non_zero_users if v == 0.0167)
assert (
count_increment_rate == 12
), f"Expected 12 increment rate values (0.0167) for /users, got {count_increment_rate}"

File diff suppressed because it is too large Load Diff