mirror of
https://github.com/SigNoz/signoz.git
synced 2026-04-08 05:00:27 +01:00
Compare commits
30 Commits
refactor/c
...
feat/json-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4af6a9abae | ||
|
|
72ce8768b3 | ||
|
|
55e892dad3 | ||
|
|
181116308f | ||
|
|
eaa678910b | ||
|
|
e994caeb02 | ||
|
|
10840f8495 | ||
|
|
1fcd3adfc8 | ||
|
|
3e14b26b00 | ||
|
|
b30bfa6371 | ||
|
|
e7f4a04b36 | ||
|
|
0687634da3 | ||
|
|
7e7732243e | ||
|
|
2f952e402f | ||
|
|
a12febca4a | ||
|
|
cb71c9c3f7 | ||
|
|
1cd4ce6509 | ||
|
|
9299c8ab18 | ||
|
|
24749de269 | ||
|
|
39098ec3f4 | ||
|
|
fe554f5c94 | ||
|
|
8a60a041a6 | ||
|
|
541f19c34a | ||
|
|
010db03d6e | ||
|
|
5408acbd8c | ||
|
|
0de6c85f81 | ||
|
|
69ec24fa05 | ||
|
|
539d732b65 | ||
|
|
843d5fb199 | ||
|
|
fabdfb8cc1 |
1
.github/workflows/integrationci.yaml
vendored
1
.github/workflows/integrationci.yaml
vendored
@@ -52,6 +52,7 @@ jobs:
|
||||
- ingestionkeys
|
||||
- rootuser
|
||||
- serviceaccount
|
||||
- querier_json_body
|
||||
sqlstore-provider:
|
||||
- postgres
|
||||
- sqlite
|
||||
|
||||
@@ -405,11 +405,11 @@ components:
|
||||
type: object
|
||||
CloudintegrationtypesAWSCollectionStrategy:
|
||||
properties:
|
||||
logs:
|
||||
aws_logs:
|
||||
$ref: '#/components/schemas/CloudintegrationtypesAWSLogsStrategy'
|
||||
metrics:
|
||||
aws_metrics:
|
||||
$ref: '#/components/schemas/CloudintegrationtypesAWSMetricsStrategy'
|
||||
s3Buckets:
|
||||
s3_buckets:
|
||||
additionalProperties:
|
||||
items:
|
||||
type: string
|
||||
@@ -449,12 +449,12 @@ components:
|
||||
type: object
|
||||
CloudintegrationtypesAWSLogsStrategy:
|
||||
properties:
|
||||
cloudwatchLogsSubscriptions:
|
||||
cloudwatch_logs_subscriptions:
|
||||
items:
|
||||
properties:
|
||||
filterPattern:
|
||||
filter_pattern:
|
||||
type: string
|
||||
logGroupNamePrefix:
|
||||
log_group_name_prefix:
|
||||
type: string
|
||||
type: object
|
||||
nullable: true
|
||||
@@ -462,7 +462,7 @@ components:
|
||||
type: object
|
||||
CloudintegrationtypesAWSMetricsStrategy:
|
||||
properties:
|
||||
cloudwatchMetricStreamFilters:
|
||||
cloudwatch_metric_stream_filters:
|
||||
items:
|
||||
properties:
|
||||
MetricNames:
|
||||
@@ -486,7 +486,7 @@ components:
|
||||
properties:
|
||||
enabled:
|
||||
type: boolean
|
||||
s3Buckets:
|
||||
s3_buckets:
|
||||
additionalProperties:
|
||||
items:
|
||||
type: string
|
||||
@@ -561,26 +561,6 @@ components:
|
||||
nullable: true
|
||||
type: array
|
||||
type: object
|
||||
CloudintegrationtypesCloudIntegrationService:
|
||||
nullable: true
|
||||
properties:
|
||||
cloudIntegrationId:
|
||||
type: string
|
||||
config:
|
||||
$ref: '#/components/schemas/CloudintegrationtypesServiceConfig'
|
||||
createdAt:
|
||||
format: date-time
|
||||
type: string
|
||||
id:
|
||||
type: string
|
||||
type:
|
||||
$ref: '#/components/schemas/CloudintegrationtypesServiceID'
|
||||
updatedAt:
|
||||
format: date-time
|
||||
type: string
|
||||
required:
|
||||
- id
|
||||
type: object
|
||||
CloudintegrationtypesCollectedLogAttribute:
|
||||
properties:
|
||||
name:
|
||||
@@ -616,16 +596,6 @@ components:
|
||||
- aws
|
||||
type: object
|
||||
CloudintegrationtypesConnectionArtifactRequest:
|
||||
properties:
|
||||
config:
|
||||
$ref: '#/components/schemas/CloudintegrationtypesConnectionArtifactRequestConfig'
|
||||
credentials:
|
||||
$ref: '#/components/schemas/CloudintegrationtypesSignozCredentials'
|
||||
required:
|
||||
- config
|
||||
- credentials
|
||||
type: object
|
||||
CloudintegrationtypesConnectionArtifactRequestConfig:
|
||||
properties:
|
||||
aws:
|
||||
$ref: '#/components/schemas/CloudintegrationtypesAWSConnectionArtifactRequest'
|
||||
@@ -724,54 +694,11 @@ components:
|
||||
type: string
|
||||
type: array
|
||||
telemetry:
|
||||
$ref: '#/components/schemas/CloudintegrationtypesOldAWSCollectionStrategy'
|
||||
$ref: '#/components/schemas/CloudintegrationtypesAWSCollectionStrategy'
|
||||
required:
|
||||
- enabled_regions
|
||||
- telemetry
|
||||
type: object
|
||||
CloudintegrationtypesOldAWSCollectionStrategy:
|
||||
properties:
|
||||
aws_logs:
|
||||
$ref: '#/components/schemas/CloudintegrationtypesOldAWSLogsStrategy'
|
||||
aws_metrics:
|
||||
$ref: '#/components/schemas/CloudintegrationtypesOldAWSMetricsStrategy'
|
||||
provider:
|
||||
type: string
|
||||
s3_buckets:
|
||||
additionalProperties:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type: object
|
||||
CloudintegrationtypesOldAWSLogsStrategy:
|
||||
properties:
|
||||
cloudwatch_logs_subscriptions:
|
||||
items:
|
||||
properties:
|
||||
filter_pattern:
|
||||
type: string
|
||||
log_group_name_prefix:
|
||||
type: string
|
||||
type: object
|
||||
nullable: true
|
||||
type: array
|
||||
type: object
|
||||
CloudintegrationtypesOldAWSMetricsStrategy:
|
||||
properties:
|
||||
cloudwatch_metric_stream_filters:
|
||||
items:
|
||||
properties:
|
||||
MetricNames:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
Namespace:
|
||||
type: string
|
||||
type: object
|
||||
nullable: true
|
||||
type: array
|
||||
type: object
|
||||
CloudintegrationtypesPostableAgentCheckInRequest:
|
||||
properties:
|
||||
account_id:
|
||||
@@ -800,8 +727,6 @@ components:
|
||||
properties:
|
||||
assets:
|
||||
$ref: '#/components/schemas/CloudintegrationtypesAssets'
|
||||
cloudIntegrationService:
|
||||
$ref: '#/components/schemas/CloudintegrationtypesCloudIntegrationService'
|
||||
dataCollected:
|
||||
$ref: '#/components/schemas/CloudintegrationtypesDataCollected'
|
||||
icon:
|
||||
@@ -810,7 +735,9 @@ components:
|
||||
type: string
|
||||
overview:
|
||||
type: string
|
||||
supportedSignals:
|
||||
serviceConfig:
|
||||
$ref: '#/components/schemas/CloudintegrationtypesServiceConfig'
|
||||
supported_signals:
|
||||
$ref: '#/components/schemas/CloudintegrationtypesSupportedSignals'
|
||||
telemetryCollectionStrategy:
|
||||
$ref: '#/components/schemas/CloudintegrationtypesCollectionStrategy'
|
||||
@@ -822,10 +749,9 @@ components:
|
||||
- icon
|
||||
- overview
|
||||
- assets
|
||||
- supportedSignals
|
||||
- supported_signals
|
||||
- dataCollected
|
||||
- telemetryCollectionStrategy
|
||||
- cloudIntegrationService
|
||||
type: object
|
||||
CloudintegrationtypesServiceConfig:
|
||||
properties:
|
||||
@@ -834,22 +760,6 @@ components:
|
||||
required:
|
||||
- aws
|
||||
type: object
|
||||
CloudintegrationtypesServiceID:
|
||||
enum:
|
||||
- alb
|
||||
- api-gateway
|
||||
- dynamodb
|
||||
- ec2
|
||||
- ecs
|
||||
- eks
|
||||
- elasticache
|
||||
- lambda
|
||||
- msk
|
||||
- rds
|
||||
- s3sync
|
||||
- sns
|
||||
- sqs
|
||||
type: string
|
||||
CloudintegrationtypesServiceMetadata:
|
||||
properties:
|
||||
enabled:
|
||||
@@ -866,22 +776,6 @@ components:
|
||||
- icon
|
||||
- enabled
|
||||
type: object
|
||||
CloudintegrationtypesSignozCredentials:
|
||||
properties:
|
||||
ingestionKey:
|
||||
type: string
|
||||
ingestionUrl:
|
||||
type: string
|
||||
sigNozApiKey:
|
||||
type: string
|
||||
sigNozApiURL:
|
||||
type: string
|
||||
required:
|
||||
- sigNozApiURL
|
||||
- sigNozApiKey
|
||||
- ingestionUrl
|
||||
- ingestionKey
|
||||
type: object
|
||||
CloudintegrationtypesSupportedSignals:
|
||||
properties:
|
||||
logs:
|
||||
@@ -3500,61 +3394,6 @@ paths:
|
||||
summary: Update account
|
||||
tags:
|
||||
- cloudintegration
|
||||
/api/v1/cloud_integrations/{cloud_provider}/accounts/{id}/services/{service_id}:
|
||||
put:
|
||||
deprecated: false
|
||||
description: This endpoint updates a service for the specified cloud provider
|
||||
operationId: UpdateService
|
||||
parameters:
|
||||
- in: path
|
||||
name: cloud_provider
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- in: path
|
||||
name: id
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- in: path
|
||||
name: service_id
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/CloudintegrationtypesUpdatableService'
|
||||
responses:
|
||||
"204":
|
||||
description: No Content
|
||||
"401":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Unauthorized
|
||||
"403":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Forbidden
|
||||
"500":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Internal Server Error
|
||||
security:
|
||||
- api_key:
|
||||
- ADMIN
|
||||
- tokenizer:
|
||||
- ADMIN
|
||||
summary: Update service
|
||||
tags:
|
||||
- cloudintegration
|
||||
/api/v1/cloud_integrations/{cloud_provider}/accounts/check_in:
|
||||
post:
|
||||
deprecated: false
|
||||
@@ -3612,59 +3451,6 @@ paths:
|
||||
summary: Agent check-in
|
||||
tags:
|
||||
- cloudintegration
|
||||
/api/v1/cloud_integrations/{cloud_provider}/credentials:
|
||||
get:
|
||||
deprecated: false
|
||||
description: This endpoint retrieves the connection credentials required for
|
||||
integration
|
||||
operationId: GetConnectionCredentials
|
||||
parameters:
|
||||
- in: path
|
||||
name: cloud_provider
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
"200":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
properties:
|
||||
data:
|
||||
$ref: '#/components/schemas/CloudintegrationtypesSignozCredentials'
|
||||
status:
|
||||
type: string
|
||||
required:
|
||||
- status
|
||||
- data
|
||||
type: object
|
||||
description: OK
|
||||
"401":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Unauthorized
|
||||
"403":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Forbidden
|
||||
"500":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Internal Server Error
|
||||
security:
|
||||
- api_key:
|
||||
- ADMIN
|
||||
- tokenizer:
|
||||
- ADMIN
|
||||
summary: Get connection credentials
|
||||
tags:
|
||||
- cloudintegration
|
||||
/api/v1/cloud_integrations/{cloud_provider}/services:
|
||||
get:
|
||||
deprecated: false
|
||||
@@ -3775,6 +3561,55 @@ paths:
|
||||
summary: Get service
|
||||
tags:
|
||||
- cloudintegration
|
||||
put:
|
||||
deprecated: false
|
||||
description: This endpoint updates a service for the specified cloud provider
|
||||
operationId: UpdateService
|
||||
parameters:
|
||||
- in: path
|
||||
name: cloud_provider
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- in: path
|
||||
name: service_id
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/CloudintegrationtypesUpdatableService'
|
||||
responses:
|
||||
"204":
|
||||
description: No Content
|
||||
"401":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Unauthorized
|
||||
"403":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Forbidden
|
||||
"500":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Internal Server Error
|
||||
security:
|
||||
- api_key:
|
||||
- ADMIN
|
||||
- tokenizer:
|
||||
- ADMIN
|
||||
summary: Update service
|
||||
tags:
|
||||
- cloudintegration
|
||||
/api/v1/complete/google:
|
||||
get:
|
||||
deprecated: false
|
||||
|
||||
@@ -33,8 +33,6 @@ import type {
|
||||
DisconnectAccountPathParameters,
|
||||
GetAccount200,
|
||||
GetAccountPathParameters,
|
||||
GetConnectionCredentials200,
|
||||
GetConnectionCredentialsPathParameters,
|
||||
GetService200,
|
||||
GetServicePathParameters,
|
||||
ListAccounts200,
|
||||
@@ -630,103 +628,6 @@ export const useUpdateAccount = <
|
||||
|
||||
return useMutation(mutationOptions);
|
||||
};
|
||||
/**
|
||||
* This endpoint updates a service for the specified cloud provider
|
||||
* @summary Update service
|
||||
*/
|
||||
export const updateService = (
|
||||
{ cloudProvider, id, serviceId }: UpdateServicePathParameters,
|
||||
cloudintegrationtypesUpdatableServiceDTO: BodyType<CloudintegrationtypesUpdatableServiceDTO>,
|
||||
) => {
|
||||
return GeneratedAPIInstance<void>({
|
||||
url: `/api/v1/cloud_integrations/${cloudProvider}/accounts/${id}/services/${serviceId}`,
|
||||
method: 'PUT',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
data: cloudintegrationtypesUpdatableServiceDTO,
|
||||
});
|
||||
};
|
||||
|
||||
export const getUpdateServiceMutationOptions = <
|
||||
TError = ErrorType<RenderErrorResponseDTO>,
|
||||
TContext = unknown
|
||||
>(options?: {
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof updateService>>,
|
||||
TError,
|
||||
{
|
||||
pathParams: UpdateServicePathParameters;
|
||||
data: BodyType<CloudintegrationtypesUpdatableServiceDTO>;
|
||||
},
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationOptions<
|
||||
Awaited<ReturnType<typeof updateService>>,
|
||||
TError,
|
||||
{
|
||||
pathParams: UpdateServicePathParameters;
|
||||
data: BodyType<CloudintegrationtypesUpdatableServiceDTO>;
|
||||
},
|
||||
TContext
|
||||
> => {
|
||||
const mutationKey = ['updateService'];
|
||||
const { mutation: mutationOptions } = options
|
||||
? options.mutation &&
|
||||
'mutationKey' in options.mutation &&
|
||||
options.mutation.mutationKey
|
||||
? options
|
||||
: { ...options, mutation: { ...options.mutation, mutationKey } }
|
||||
: { mutation: { mutationKey } };
|
||||
|
||||
const mutationFn: MutationFunction<
|
||||
Awaited<ReturnType<typeof updateService>>,
|
||||
{
|
||||
pathParams: UpdateServicePathParameters;
|
||||
data: BodyType<CloudintegrationtypesUpdatableServiceDTO>;
|
||||
}
|
||||
> = (props) => {
|
||||
const { pathParams, data } = props ?? {};
|
||||
|
||||
return updateService(pathParams, data);
|
||||
};
|
||||
|
||||
return { mutationFn, ...mutationOptions };
|
||||
};
|
||||
|
||||
export type UpdateServiceMutationResult = NonNullable<
|
||||
Awaited<ReturnType<typeof updateService>>
|
||||
>;
|
||||
export type UpdateServiceMutationBody = BodyType<CloudintegrationtypesUpdatableServiceDTO>;
|
||||
export type UpdateServiceMutationError = ErrorType<RenderErrorResponseDTO>;
|
||||
|
||||
/**
|
||||
* @summary Update service
|
||||
*/
|
||||
export const useUpdateService = <
|
||||
TError = ErrorType<RenderErrorResponseDTO>,
|
||||
TContext = unknown
|
||||
>(options?: {
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof updateService>>,
|
||||
TError,
|
||||
{
|
||||
pathParams: UpdateServicePathParameters;
|
||||
data: BodyType<CloudintegrationtypesUpdatableServiceDTO>;
|
||||
},
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationResult<
|
||||
Awaited<ReturnType<typeof updateService>>,
|
||||
TError,
|
||||
{
|
||||
pathParams: UpdateServicePathParameters;
|
||||
data: BodyType<CloudintegrationtypesUpdatableServiceDTO>;
|
||||
},
|
||||
TContext
|
||||
> => {
|
||||
const mutationOptions = getUpdateServiceMutationOptions(options);
|
||||
|
||||
return useMutation(mutationOptions);
|
||||
};
|
||||
/**
|
||||
* This endpoint is called by the deployed agent to check in
|
||||
* @summary Agent check-in
|
||||
@@ -826,114 +727,6 @@ export const useAgentCheckIn = <
|
||||
|
||||
return useMutation(mutationOptions);
|
||||
};
|
||||
/**
|
||||
* This endpoint retrieves the connection credentials required for integration
|
||||
* @summary Get connection credentials
|
||||
*/
|
||||
export const getConnectionCredentials = (
|
||||
{ cloudProvider }: GetConnectionCredentialsPathParameters,
|
||||
signal?: AbortSignal,
|
||||
) => {
|
||||
return GeneratedAPIInstance<GetConnectionCredentials200>({
|
||||
url: `/api/v1/cloud_integrations/${cloudProvider}/credentials`,
|
||||
method: 'GET',
|
||||
signal,
|
||||
});
|
||||
};
|
||||
|
||||
export const getGetConnectionCredentialsQueryKey = ({
|
||||
cloudProvider,
|
||||
}: GetConnectionCredentialsPathParameters) => {
|
||||
return [`/api/v1/cloud_integrations/${cloudProvider}/credentials`] as const;
|
||||
};
|
||||
|
||||
export const getGetConnectionCredentialsQueryOptions = <
|
||||
TData = Awaited<ReturnType<typeof getConnectionCredentials>>,
|
||||
TError = ErrorType<RenderErrorResponseDTO>
|
||||
>(
|
||||
{ cloudProvider }: GetConnectionCredentialsPathParameters,
|
||||
options?: {
|
||||
query?: UseQueryOptions<
|
||||
Awaited<ReturnType<typeof getConnectionCredentials>>,
|
||||
TError,
|
||||
TData
|
||||
>;
|
||||
},
|
||||
) => {
|
||||
const { query: queryOptions } = options ?? {};
|
||||
|
||||
const queryKey =
|
||||
queryOptions?.queryKey ??
|
||||
getGetConnectionCredentialsQueryKey({ cloudProvider });
|
||||
|
||||
const queryFn: QueryFunction<
|
||||
Awaited<ReturnType<typeof getConnectionCredentials>>
|
||||
> = ({ signal }) => getConnectionCredentials({ cloudProvider }, signal);
|
||||
|
||||
return {
|
||||
queryKey,
|
||||
queryFn,
|
||||
enabled: !!cloudProvider,
|
||||
...queryOptions,
|
||||
} as UseQueryOptions<
|
||||
Awaited<ReturnType<typeof getConnectionCredentials>>,
|
||||
TError,
|
||||
TData
|
||||
> & { queryKey: QueryKey };
|
||||
};
|
||||
|
||||
export type GetConnectionCredentialsQueryResult = NonNullable<
|
||||
Awaited<ReturnType<typeof getConnectionCredentials>>
|
||||
>;
|
||||
export type GetConnectionCredentialsQueryError = ErrorType<RenderErrorResponseDTO>;
|
||||
|
||||
/**
|
||||
* @summary Get connection credentials
|
||||
*/
|
||||
|
||||
export function useGetConnectionCredentials<
|
||||
TData = Awaited<ReturnType<typeof getConnectionCredentials>>,
|
||||
TError = ErrorType<RenderErrorResponseDTO>
|
||||
>(
|
||||
{ cloudProvider }: GetConnectionCredentialsPathParameters,
|
||||
options?: {
|
||||
query?: UseQueryOptions<
|
||||
Awaited<ReturnType<typeof getConnectionCredentials>>,
|
||||
TError,
|
||||
TData
|
||||
>;
|
||||
},
|
||||
): UseQueryResult<TData, TError> & { queryKey: QueryKey } {
|
||||
const queryOptions = getGetConnectionCredentialsQueryOptions(
|
||||
{ cloudProvider },
|
||||
options,
|
||||
);
|
||||
|
||||
const query = useQuery(queryOptions) as UseQueryResult<TData, TError> & {
|
||||
queryKey: QueryKey;
|
||||
};
|
||||
|
||||
query.queryKey = queryOptions.queryKey;
|
||||
|
||||
return query;
|
||||
}
|
||||
|
||||
/**
|
||||
* @summary Get connection credentials
|
||||
*/
|
||||
export const invalidateGetConnectionCredentials = async (
|
||||
queryClient: QueryClient,
|
||||
{ cloudProvider }: GetConnectionCredentialsPathParameters,
|
||||
options?: InvalidateOptions,
|
||||
): Promise<QueryClient> => {
|
||||
await queryClient.invalidateQueries(
|
||||
{ queryKey: getGetConnectionCredentialsQueryKey({ cloudProvider }) },
|
||||
options,
|
||||
);
|
||||
|
||||
return queryClient;
|
||||
};
|
||||
|
||||
/**
|
||||
* This endpoint lists the services metadata for the specified cloud provider
|
||||
* @summary List services metadata
|
||||
@@ -1148,3 +941,101 @@ export const invalidateGetService = async (
|
||||
|
||||
return queryClient;
|
||||
};
|
||||
|
||||
/**
|
||||
* This endpoint updates a service for the specified cloud provider
|
||||
* @summary Update service
|
||||
*/
|
||||
export const updateService = (
|
||||
{ cloudProvider, serviceId }: UpdateServicePathParameters,
|
||||
cloudintegrationtypesUpdatableServiceDTO: BodyType<CloudintegrationtypesUpdatableServiceDTO>,
|
||||
) => {
|
||||
return GeneratedAPIInstance<void>({
|
||||
url: `/api/v1/cloud_integrations/${cloudProvider}/services/${serviceId}`,
|
||||
method: 'PUT',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
data: cloudintegrationtypesUpdatableServiceDTO,
|
||||
});
|
||||
};
|
||||
|
||||
export const getUpdateServiceMutationOptions = <
|
||||
TError = ErrorType<RenderErrorResponseDTO>,
|
||||
TContext = unknown
|
||||
>(options?: {
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof updateService>>,
|
||||
TError,
|
||||
{
|
||||
pathParams: UpdateServicePathParameters;
|
||||
data: BodyType<CloudintegrationtypesUpdatableServiceDTO>;
|
||||
},
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationOptions<
|
||||
Awaited<ReturnType<typeof updateService>>,
|
||||
TError,
|
||||
{
|
||||
pathParams: UpdateServicePathParameters;
|
||||
data: BodyType<CloudintegrationtypesUpdatableServiceDTO>;
|
||||
},
|
||||
TContext
|
||||
> => {
|
||||
const mutationKey = ['updateService'];
|
||||
const { mutation: mutationOptions } = options
|
||||
? options.mutation &&
|
||||
'mutationKey' in options.mutation &&
|
||||
options.mutation.mutationKey
|
||||
? options
|
||||
: { ...options, mutation: { ...options.mutation, mutationKey } }
|
||||
: { mutation: { mutationKey } };
|
||||
|
||||
const mutationFn: MutationFunction<
|
||||
Awaited<ReturnType<typeof updateService>>,
|
||||
{
|
||||
pathParams: UpdateServicePathParameters;
|
||||
data: BodyType<CloudintegrationtypesUpdatableServiceDTO>;
|
||||
}
|
||||
> = (props) => {
|
||||
const { pathParams, data } = props ?? {};
|
||||
|
||||
return updateService(pathParams, data);
|
||||
};
|
||||
|
||||
return { mutationFn, ...mutationOptions };
|
||||
};
|
||||
|
||||
export type UpdateServiceMutationResult = NonNullable<
|
||||
Awaited<ReturnType<typeof updateService>>
|
||||
>;
|
||||
export type UpdateServiceMutationBody = BodyType<CloudintegrationtypesUpdatableServiceDTO>;
|
||||
export type UpdateServiceMutationError = ErrorType<RenderErrorResponseDTO>;
|
||||
|
||||
/**
|
||||
* @summary Update service
|
||||
*/
|
||||
export const useUpdateService = <
|
||||
TError = ErrorType<RenderErrorResponseDTO>,
|
||||
TContext = unknown
|
||||
>(options?: {
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof updateService>>,
|
||||
TError,
|
||||
{
|
||||
pathParams: UpdateServicePathParameters;
|
||||
data: BodyType<CloudintegrationtypesUpdatableServiceDTO>;
|
||||
},
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationResult<
|
||||
Awaited<ReturnType<typeof updateService>>,
|
||||
TError,
|
||||
{
|
||||
pathParams: UpdateServicePathParameters;
|
||||
data: BodyType<CloudintegrationtypesUpdatableServiceDTO>;
|
||||
},
|
||||
TContext
|
||||
> => {
|
||||
const mutationOptions = getUpdateServiceMutationOptions(options);
|
||||
|
||||
return useMutation(mutationOptions);
|
||||
};
|
||||
|
||||
@@ -517,12 +517,12 @@ export type CloudintegrationtypesAWSCollectionStrategyDTOS3Buckets = {
|
||||
};
|
||||
|
||||
export interface CloudintegrationtypesAWSCollectionStrategyDTO {
|
||||
logs?: CloudintegrationtypesAWSLogsStrategyDTO;
|
||||
metrics?: CloudintegrationtypesAWSMetricsStrategyDTO;
|
||||
aws_logs?: CloudintegrationtypesAWSLogsStrategyDTO;
|
||||
aws_metrics?: CloudintegrationtypesAWSMetricsStrategyDTO;
|
||||
/**
|
||||
* @type object
|
||||
*/
|
||||
s3Buckets?: CloudintegrationtypesAWSCollectionStrategyDTOS3Buckets;
|
||||
s3_buckets?: CloudintegrationtypesAWSCollectionStrategyDTOS3Buckets;
|
||||
}
|
||||
|
||||
export interface CloudintegrationtypesAWSConnectionArtifactDTO {
|
||||
@@ -555,11 +555,11 @@ export type CloudintegrationtypesAWSLogsStrategyDTOCloudwatchLogsSubscriptionsIt
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
filterPattern?: string;
|
||||
filter_pattern?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
logGroupNamePrefix?: string;
|
||||
log_group_name_prefix?: string;
|
||||
};
|
||||
|
||||
export interface CloudintegrationtypesAWSLogsStrategyDTO {
|
||||
@@ -567,7 +567,7 @@ export interface CloudintegrationtypesAWSLogsStrategyDTO {
|
||||
* @type array
|
||||
* @nullable true
|
||||
*/
|
||||
cloudwatchLogsSubscriptions?:
|
||||
cloudwatch_logs_subscriptions?:
|
||||
| CloudintegrationtypesAWSLogsStrategyDTOCloudwatchLogsSubscriptionsItem[]
|
||||
| null;
|
||||
}
|
||||
@@ -588,7 +588,7 @@ export interface CloudintegrationtypesAWSMetricsStrategyDTO {
|
||||
* @type array
|
||||
* @nullable true
|
||||
*/
|
||||
cloudwatchMetricStreamFilters?:
|
||||
cloudwatch_metric_stream_filters?:
|
||||
| CloudintegrationtypesAWSMetricsStrategyDTOCloudwatchMetricStreamFiltersItem[]
|
||||
| null;
|
||||
}
|
||||
@@ -610,7 +610,7 @@ export interface CloudintegrationtypesAWSServiceLogsConfigDTO {
|
||||
/**
|
||||
* @type object
|
||||
*/
|
||||
s3Buckets?: CloudintegrationtypesAWSServiceLogsConfigDTOS3Buckets;
|
||||
s3_buckets?: CloudintegrationtypesAWSServiceLogsConfigDTOS3Buckets;
|
||||
}
|
||||
|
||||
export interface CloudintegrationtypesAWSServiceMetricsConfigDTO {
|
||||
@@ -693,32 +693,6 @@ export interface CloudintegrationtypesAssetsDTO {
|
||||
dashboards?: CloudintegrationtypesDashboardDTO[] | null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @nullable
|
||||
*/
|
||||
export type CloudintegrationtypesCloudIntegrationServiceDTO = {
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
cloudIntegrationId?: string;
|
||||
config?: CloudintegrationtypesServiceConfigDTO;
|
||||
/**
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
id: string;
|
||||
type?: CloudintegrationtypesServiceIDDTO;
|
||||
/**
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
} | null;
|
||||
|
||||
export interface CloudintegrationtypesCollectedLogAttributeDTO {
|
||||
/**
|
||||
* @type string
|
||||
@@ -762,11 +736,6 @@ export interface CloudintegrationtypesConnectionArtifactDTO {
|
||||
}
|
||||
|
||||
export interface CloudintegrationtypesConnectionArtifactRequestDTO {
|
||||
config: CloudintegrationtypesConnectionArtifactRequestConfigDTO;
|
||||
credentials: CloudintegrationtypesSignozCredentialsDTO;
|
||||
}
|
||||
|
||||
export interface CloudintegrationtypesConnectionArtifactRequestConfigDTO {
|
||||
aws: CloudintegrationtypesAWSConnectionArtifactRequestDTO;
|
||||
}
|
||||
|
||||
@@ -862,68 +831,9 @@ export type CloudintegrationtypesIntegrationConfigDTO = {
|
||||
* @type array
|
||||
*/
|
||||
enabled_regions: string[];
|
||||
telemetry: CloudintegrationtypesOldAWSCollectionStrategyDTO;
|
||||
telemetry: CloudintegrationtypesAWSCollectionStrategyDTO;
|
||||
} | null;
|
||||
|
||||
export type CloudintegrationtypesOldAWSCollectionStrategyDTOS3Buckets = {
|
||||
[key: string]: string[];
|
||||
};
|
||||
|
||||
export interface CloudintegrationtypesOldAWSCollectionStrategyDTO {
|
||||
aws_logs?: CloudintegrationtypesOldAWSLogsStrategyDTO;
|
||||
aws_metrics?: CloudintegrationtypesOldAWSMetricsStrategyDTO;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
provider?: string;
|
||||
/**
|
||||
* @type object
|
||||
*/
|
||||
s3_buckets?: CloudintegrationtypesOldAWSCollectionStrategyDTOS3Buckets;
|
||||
}
|
||||
|
||||
export type CloudintegrationtypesOldAWSLogsStrategyDTOCloudwatchLogsSubscriptionsItem = {
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
filter_pattern?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
log_group_name_prefix?: string;
|
||||
};
|
||||
|
||||
export interface CloudintegrationtypesOldAWSLogsStrategyDTO {
|
||||
/**
|
||||
* @type array
|
||||
* @nullable true
|
||||
*/
|
||||
cloudwatch_logs_subscriptions?:
|
||||
| CloudintegrationtypesOldAWSLogsStrategyDTOCloudwatchLogsSubscriptionsItem[]
|
||||
| null;
|
||||
}
|
||||
|
||||
export type CloudintegrationtypesOldAWSMetricsStrategyDTOCloudwatchMetricStreamFiltersItem = {
|
||||
/**
|
||||
* @type array
|
||||
*/
|
||||
MetricNames?: string[];
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
Namespace?: string;
|
||||
};
|
||||
|
||||
export interface CloudintegrationtypesOldAWSMetricsStrategyDTO {
|
||||
/**
|
||||
* @type array
|
||||
* @nullable true
|
||||
*/
|
||||
cloudwatch_metric_stream_filters?:
|
||||
| CloudintegrationtypesOldAWSMetricsStrategyDTOCloudwatchMetricStreamFiltersItem[]
|
||||
| null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @nullable
|
||||
*/
|
||||
@@ -961,7 +871,6 @@ export interface CloudintegrationtypesProviderIntegrationConfigDTO {
|
||||
|
||||
export interface CloudintegrationtypesServiceDTO {
|
||||
assets: CloudintegrationtypesAssetsDTO;
|
||||
cloudIntegrationService: CloudintegrationtypesCloudIntegrationServiceDTO;
|
||||
dataCollected: CloudintegrationtypesDataCollectedDTO;
|
||||
/**
|
||||
* @type string
|
||||
@@ -975,7 +884,8 @@ export interface CloudintegrationtypesServiceDTO {
|
||||
* @type string
|
||||
*/
|
||||
overview: string;
|
||||
supportedSignals: CloudintegrationtypesSupportedSignalsDTO;
|
||||
serviceConfig?: CloudintegrationtypesServiceConfigDTO;
|
||||
supported_signals: CloudintegrationtypesSupportedSignalsDTO;
|
||||
telemetryCollectionStrategy: CloudintegrationtypesCollectionStrategyDTO;
|
||||
/**
|
||||
* @type string
|
||||
@@ -987,21 +897,6 @@ export interface CloudintegrationtypesServiceConfigDTO {
|
||||
aws: CloudintegrationtypesAWSServiceConfigDTO;
|
||||
}
|
||||
|
||||
export enum CloudintegrationtypesServiceIDDTO {
|
||||
alb = 'alb',
|
||||
'api-gateway' = 'api-gateway',
|
||||
dynamodb = 'dynamodb',
|
||||
ec2 = 'ec2',
|
||||
ecs = 'ecs',
|
||||
eks = 'eks',
|
||||
elasticache = 'elasticache',
|
||||
lambda = 'lambda',
|
||||
msk = 'msk',
|
||||
rds = 'rds',
|
||||
s3sync = 's3sync',
|
||||
sns = 'sns',
|
||||
sqs = 'sqs',
|
||||
}
|
||||
export interface CloudintegrationtypesServiceMetadataDTO {
|
||||
/**
|
||||
* @type boolean
|
||||
@@ -1021,25 +916,6 @@ export interface CloudintegrationtypesServiceMetadataDTO {
|
||||
title: string;
|
||||
}
|
||||
|
||||
export interface CloudintegrationtypesSignozCredentialsDTO {
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
ingestionKey: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
ingestionUrl: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
sigNozApiKey: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
sigNozApiURL: string;
|
||||
}
|
||||
|
||||
export interface CloudintegrationtypesSupportedSignalsDTO {
|
||||
/**
|
||||
* @type boolean
|
||||
@@ -3623,11 +3499,6 @@ export type UpdateAccountPathParameters = {
|
||||
cloudProvider: string;
|
||||
id: string;
|
||||
};
|
||||
export type UpdateServicePathParameters = {
|
||||
cloudProvider: string;
|
||||
id: string;
|
||||
serviceId: string;
|
||||
};
|
||||
export type AgentCheckInPathParameters = {
|
||||
cloudProvider: string;
|
||||
};
|
||||
@@ -3639,17 +3510,6 @@ export type AgentCheckIn200 = {
|
||||
status: string;
|
||||
};
|
||||
|
||||
export type GetConnectionCredentialsPathParameters = {
|
||||
cloudProvider: string;
|
||||
};
|
||||
export type GetConnectionCredentials200 = {
|
||||
data: CloudintegrationtypesSignozCredentialsDTO;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
status: string;
|
||||
};
|
||||
|
||||
export type ListServicesMetadataPathParameters = {
|
||||
cloudProvider: string;
|
||||
};
|
||||
@@ -3673,6 +3533,10 @@ export type GetService200 = {
|
||||
status: string;
|
||||
};
|
||||
|
||||
export type UpdateServicePathParameters = {
|
||||
cloudProvider: string;
|
||||
serviceId: string;
|
||||
};
|
||||
export type CreateSessionByGoogleCallback303 = {
|
||||
data: AuthtypesGettableTokenDTO;
|
||||
/**
|
||||
|
||||
@@ -10,26 +10,6 @@ import (
|
||||
)
|
||||
|
||||
func (provider *provider) addCloudIntegrationRoutes(router *mux.Router) error {
|
||||
if err := router.Handle("/api/v1/cloud_integrations/{cloud_provider}/credentials", handler.New(
|
||||
provider.authZ.AdminAccess(provider.cloudIntegrationHandler.GetConnectionCredentials),
|
||||
handler.OpenAPIDef{
|
||||
ID: "GetConnectionCredentials",
|
||||
Tags: []string{"cloudintegration"},
|
||||
Summary: "Get connection credentials",
|
||||
Description: "This endpoint retrieves the connection credentials required for integration",
|
||||
Request: nil,
|
||||
RequestContentType: "application/json",
|
||||
Response: new(citypes.SignozCredentials),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{},
|
||||
Deprecated: false,
|
||||
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
|
||||
},
|
||||
)).Methods(http.MethodGet).GetError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := router.Handle("/api/v1/cloud_integrations/{cloud_provider}/accounts", handler.New(
|
||||
provider.authZ.AdminAccess(provider.cloudIntegrationHandler.CreateAccount),
|
||||
handler.OpenAPIDef{
|
||||
@@ -79,7 +59,7 @@ func (provider *provider) addCloudIntegrationRoutes(router *mux.Router) error {
|
||||
Description: "This endpoint gets an account for the specified cloud provider",
|
||||
Request: nil,
|
||||
RequestContentType: "",
|
||||
Response: new(citypes.Account),
|
||||
Response: new(citypes.GettableAccount),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound},
|
||||
@@ -159,7 +139,7 @@ func (provider *provider) addCloudIntegrationRoutes(router *mux.Router) error {
|
||||
Description: "This endpoint gets a service for the specified cloud provider",
|
||||
Request: nil,
|
||||
RequestContentType: "",
|
||||
Response: new(citypes.Service),
|
||||
Response: new(citypes.GettableService),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{},
|
||||
@@ -170,7 +150,7 @@ func (provider *provider) addCloudIntegrationRoutes(router *mux.Router) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := router.Handle("/api/v1/cloud_integrations/{cloud_provider}/accounts/{id}/services/{service_id}", handler.New(
|
||||
if err := router.Handle("/api/v1/cloud_integrations/{cloud_provider}/services/{service_id}", handler.New(
|
||||
provider.authZ.AdminAccess(provider.cloudIntegrationHandler.UpdateService),
|
||||
handler.OpenAPIDef{
|
||||
ID: "UpdateService",
|
||||
|
||||
@@ -10,21 +10,19 @@ import (
|
||||
)
|
||||
|
||||
type Module interface {
|
||||
GetConnectionCredentials(ctx context.Context, orgID valuer.UUID, provider citypes.CloudProviderType) (*citypes.SignozCredentials, error)
|
||||
|
||||
CreateAccount(ctx context.Context, account *citypes.Account) error
|
||||
|
||||
// GetAccount returns cloud integration account
|
||||
GetAccount(ctx context.Context, orgID, accountID valuer.UUID, provider citypes.CloudProviderType) (*citypes.Account, error)
|
||||
GetAccount(ctx context.Context, orgID, accountID valuer.UUID) (*citypes.Account, error)
|
||||
|
||||
// ListAccounts lists accounts where agent is connected
|
||||
ListAccounts(ctx context.Context, orgID valuer.UUID, provider citypes.CloudProviderType) ([]*citypes.Account, error)
|
||||
ListAccounts(ctx context.Context, orgID valuer.UUID) ([]*citypes.Account, error)
|
||||
|
||||
// UpdateAccount updates the cloud integration account for a specific organization.
|
||||
UpdateAccount(ctx context.Context, account *citypes.Account) error
|
||||
|
||||
// DisconnectAccount soft deletes/removes a cloud integration account.
|
||||
DisconnectAccount(ctx context.Context, orgID, accountID valuer.UUID, provider citypes.CloudProviderType) error
|
||||
DisconnectAccount(ctx context.Context, orgID, accountID valuer.UUID) error
|
||||
|
||||
// GetConnectionArtifact returns cloud provider specific connection information,
|
||||
// client side handles how this information is shown
|
||||
@@ -32,20 +30,17 @@ type Module interface {
|
||||
|
||||
// ListServicesMetadata returns the list of services metadata for a cloud provider attached with the integrationID.
|
||||
// This just returns a summary of the service and not the whole service definition
|
||||
ListServicesMetadata(ctx context.Context, orgID valuer.UUID, provider citypes.CloudProviderType, integrationID *valuer.UUID) ([]*citypes.ServiceMetadata, error)
|
||||
ListServicesMetadata(ctx context.Context, orgID valuer.UUID, integrationID *valuer.UUID) ([]*citypes.ServiceMetadata, error)
|
||||
|
||||
// GetService returns service definition details for a serviceID. This returns config and
|
||||
// other details required to show in service details page on web client.
|
||||
GetService(ctx context.Context, orgID valuer.UUID, integrationID *valuer.UUID, serviceID citypes.ServiceID, provider citypes.CloudProviderType) (*citypes.Service, error)
|
||||
|
||||
// CreateService creates a new service for a cloud integration account.
|
||||
CreateService(ctx context.Context, orgID valuer.UUID, service *citypes.CloudIntegrationService, provider citypes.CloudProviderType) error
|
||||
GetService(ctx context.Context, orgID valuer.UUID, integrationID *valuer.UUID, serviceID string) (*citypes.Service, error)
|
||||
|
||||
// UpdateService updates cloud integration service
|
||||
UpdateService(ctx context.Context, orgID valuer.UUID, service *citypes.CloudIntegrationService, provider citypes.CloudProviderType) error
|
||||
UpdateService(ctx context.Context, orgID valuer.UUID, service *citypes.CloudIntegrationService) error
|
||||
|
||||
// AgentCheckIn is called by agent to heartbeat and get latest config in response.
|
||||
AgentCheckIn(ctx context.Context, orgID valuer.UUID, provider citypes.CloudProviderType, req *citypes.AgentCheckInRequest) (*citypes.AgentCheckInResponse, error)
|
||||
AgentCheckIn(ctx context.Context, orgID valuer.UUID, req *citypes.AgentCheckInRequest) (*citypes.AgentCheckInResponse, error)
|
||||
|
||||
// GetDashboardByID returns dashboard JSON for a given dashboard id.
|
||||
// this only returns the dashboard when the service (embedded in dashboard id) is enabled
|
||||
@@ -57,22 +52,7 @@ type Module interface {
|
||||
ListDashboards(ctx context.Context, orgID valuer.UUID) ([]*dashboardtypes.Dashboard, error)
|
||||
}
|
||||
|
||||
type CloudProviderModule interface {
|
||||
GetConnectionArtifact(ctx context.Context, account *citypes.Account, req *citypes.ConnectionArtifactRequest) (*citypes.ConnectionArtifact, error)
|
||||
|
||||
// ListServiceDefinitions returns all service definitions for this cloud provider.
|
||||
ListServiceDefinitions(ctx context.Context) ([]*citypes.ServiceDefinition, error)
|
||||
|
||||
// GetServiceDefinition returns the service definition for the given service ID.
|
||||
GetServiceDefinition(ctx context.Context, serviceID citypes.ServiceID) (*citypes.ServiceDefinition, error)
|
||||
|
||||
// BuildIntegrationConfig compiles the provider-specific integration config from the account
|
||||
// and list of configured services. This is the config returned to the agent on check-in.
|
||||
BuildIntegrationConfig(ctx context.Context, account *citypes.Account, services []*citypes.StorableCloudIntegrationService) (*citypes.ProviderIntegrationConfig, error)
|
||||
}
|
||||
|
||||
type Handler interface {
|
||||
GetConnectionCredentials(http.ResponseWriter, *http.Request)
|
||||
CreateAccount(http.ResponseWriter, *http.Request)
|
||||
ListAccounts(http.ResponseWriter, *http.Request)
|
||||
GetAccount(http.ResponseWriter, *http.Request)
|
||||
|
||||
@@ -12,10 +12,6 @@ func NewHandler() cloudintegration.Handler {
|
||||
return &handler{}
|
||||
}
|
||||
|
||||
func (handler *handler) GetConnectionCredentials(http.ResponseWriter, *http.Request) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func (handler *handler) CreateAccount(writer http.ResponseWriter, request *http.Request) {
|
||||
// TODO implement me
|
||||
panic("implement me")
|
||||
|
||||
@@ -34,25 +34,6 @@ func (store *store) GetAccountByID(ctx context.Context, orgID, id valuer.UUID, p
|
||||
return account, nil
|
||||
}
|
||||
|
||||
func (store *store) GetConnectedAccount(ctx context.Context, orgID valuer.UUID, provider cloudintegrationtypes.CloudProviderType, providerAccountID string) (*cloudintegrationtypes.StorableCloudIntegration, error) {
|
||||
account := new(cloudintegrationtypes.StorableCloudIntegration)
|
||||
err := store.
|
||||
store.
|
||||
BunDBCtx(ctx).
|
||||
NewSelect().
|
||||
Model(account).
|
||||
Where("org_id = ?", orgID).
|
||||
Where("provider = ?", provider).
|
||||
Where("account_id = ?", providerAccountID).
|
||||
Where("last_agent_report IS NOT NULL").
|
||||
Where("removed_at IS NULL").
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, store.store.WrapNotFoundErrf(err, cloudintegrationtypes.ErrCodeCloudIntegrationNotFound, "connected account with provider account id %s not found", providerAccountID)
|
||||
}
|
||||
return account, nil
|
||||
}
|
||||
|
||||
func (store *store) ListConnectedAccounts(ctx context.Context, orgID valuer.UUID, provider cloudintegrationtypes.CloudProviderType) ([]*cloudintegrationtypes.StorableCloudIntegration, error) {
|
||||
var accounts []*cloudintegrationtypes.StorableCloudIntegration
|
||||
err := store.
|
||||
@@ -115,6 +96,25 @@ func (store *store) RemoveAccount(ctx context.Context, orgID, id valuer.UUID, pr
|
||||
return err
|
||||
}
|
||||
|
||||
func (store *store) GetConnectedAccount(ctx context.Context, orgID valuer.UUID, provider cloudintegrationtypes.CloudProviderType, providerAccountID string) (*cloudintegrationtypes.StorableCloudIntegration, error) {
|
||||
account := new(cloudintegrationtypes.StorableCloudIntegration)
|
||||
err := store.
|
||||
store.
|
||||
BunDBCtx(ctx).
|
||||
NewSelect().
|
||||
Model(account).
|
||||
Where("org_id = ?", orgID).
|
||||
Where("provider = ?", provider).
|
||||
Where("account_id = ?", providerAccountID).
|
||||
Where("last_agent_report IS NOT NULL").
|
||||
Where("removed_at IS NULL").
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, store.store.WrapNotFoundErrf(err, cloudintegrationtypes.ErrCodeCloudIntegrationNotFound, "connected account with provider account id %s not found", providerAccountID)
|
||||
}
|
||||
return account, nil
|
||||
}
|
||||
|
||||
func (store *store) GetServiceByServiceID(ctx context.Context, cloudIntegrationID valuer.UUID, serviceID cloudintegrationtypes.ServiceID) (*cloudintegrationtypes.StorableCloudIntegrationService, error) {
|
||||
service := new(cloudintegrationtypes.StorableCloudIntegrationService)
|
||||
err := store.
|
||||
@@ -172,9 +172,3 @@ func (store *store) UpdateService(ctx context.Context, service *cloudintegration
|
||||
Exec(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
func (store *store) RunInTx(ctx context.Context, cb func(ctx context.Context) error) error {
|
||||
return store.store.RunInTxCtx(ctx, nil, func(ctx context.Context) error {
|
||||
return cb(ctx)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -211,6 +211,8 @@ func DataTypeCollisionHandledFieldName(key *telemetrytypes.TelemetryFieldKey, va
|
||||
case float64:
|
||||
// try to convert the string value to to number
|
||||
tblFieldName = castFloat(tblFieldName)
|
||||
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
|
||||
tblFieldName = castFloat(tblFieldName)
|
||||
case []any:
|
||||
if allFloats(v) {
|
||||
tblFieldName = castFloat(tblFieldName)
|
||||
@@ -277,6 +279,18 @@ func DataTypeCollisionHandledFieldName(key *telemetrytypes.TelemetryFieldKey, va
|
||||
tblFieldName, value = castString(tblFieldName), toStrings(v)
|
||||
}
|
||||
}
|
||||
case telemetrytypes.FieldDataTypeArrayDynamic:
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
tblFieldName = castString(tblFieldName)
|
||||
case float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
|
||||
tblFieldName = accurateCastFloat(tblFieldName)
|
||||
case bool:
|
||||
tblFieldName = castBool(tblFieldName)
|
||||
case []any:
|
||||
// dynamic array elements will be default casted to string
|
||||
tblFieldName, value = castString(tblFieldName), toStrings(v)
|
||||
}
|
||||
}
|
||||
return tblFieldName, value
|
||||
}
|
||||
@@ -284,6 +298,10 @@ func DataTypeCollisionHandledFieldName(key *telemetrytypes.TelemetryFieldKey, va
|
||||
func castFloat(col string) string { return fmt.Sprintf("toFloat64OrNull(%s)", col) }
|
||||
func castFloatHack(col string) string { return fmt.Sprintf("toFloat64(%s)", col) }
|
||||
func castString(col string) string { return fmt.Sprintf("toString(%s)", col) }
|
||||
func castBool(col string) string { return fmt.Sprintf("accurateCastOrNull(%s, 'Bool')", col) }
|
||||
func accurateCastFloat(col string) string {
|
||||
return fmt.Sprintf("accurateCastOrNull(%s, 'Float64')", col)
|
||||
}
|
||||
|
||||
func allFloats(in []any) bool {
|
||||
for _, x := range in {
|
||||
|
||||
@@ -3,6 +3,7 @@ package telemetrylogs
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
schemamigrator "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
@@ -33,182 +34,34 @@ func NewJSONConditionBuilder(key *telemetrytypes.TelemetryFieldKey, valueType te
|
||||
|
||||
// BuildCondition builds the full WHERE condition for body_v2 JSON paths.
|
||||
func (c *jsonConditionBuilder) buildJSONCondition(operator qbtypes.FilterOperator, value any, sb *sqlbuilder.SelectBuilder) (string, error) {
|
||||
conditions := []string{}
|
||||
for _, node := range c.key.JSONPlan {
|
||||
condition, err := c.emitPlannedCondition(node, operator, value, sb)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
conditions = append(conditions, condition)
|
||||
baseCond, err := c.emitPlannedCondition(operator, value, sb)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
baseCond := sb.Or(conditions...)
|
||||
|
||||
// path index
|
||||
if operator.AddDefaultExistsFilter() {
|
||||
pathIndex := fmt.Sprintf(`has(%s, '%s')`, schemamigrator.JSONPathsIndexExpr(LogsV2BodyV2Column), c.key.ArrayParentPaths()[0])
|
||||
return sb.And(baseCond, pathIndex), nil
|
||||
}
|
||||
|
||||
return baseCond, nil
|
||||
}
|
||||
|
||||
// emitPlannedCondition handles paths with array traversal.
|
||||
func (c *jsonConditionBuilder) emitPlannedCondition(node *telemetrytypes.JSONAccessNode, operator qbtypes.FilterOperator, value any, sb *sqlbuilder.SelectBuilder) (string, error) {
|
||||
func (c *jsonConditionBuilder) emitPlannedCondition(operator qbtypes.FilterOperator, value any, sb *sqlbuilder.SelectBuilder) (string, error) {
|
||||
// Build traversal + terminal recursively per-hop
|
||||
compiled, err := c.recurseArrayHops(node, operator, value, sb)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return compiled, nil
|
||||
}
|
||||
|
||||
// buildTerminalCondition creates the innermost condition.
|
||||
func (c *jsonConditionBuilder) buildTerminalCondition(node *telemetrytypes.JSONAccessNode, operator qbtypes.FilterOperator, value any, sb *sqlbuilder.SelectBuilder) (string, error) {
|
||||
if node.TerminalConfig.ElemType.IsArray {
|
||||
conditions := []string{}
|
||||
// if the value type is not an array
|
||||
// TODO(piyush): Confirm the Query built for Array case and add testcases for it later
|
||||
if !c.valueType.IsArray {
|
||||
// if operator is a String search Operator, then we need to build one more String comparison condition along with the Strict match condition
|
||||
if operator.IsStringSearchOperator() {
|
||||
formattedValue := querybuilder.FormatValueForContains(value)
|
||||
arrayCond, err := c.buildArrayMembershipCondition(node, operator, formattedValue, sb)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
conditions = append(conditions, arrayCond)
|
||||
}
|
||||
|
||||
// switch operator for array membership checks
|
||||
switch operator {
|
||||
case qbtypes.FilterOperatorContains:
|
||||
operator = qbtypes.FilterOperatorEqual
|
||||
case qbtypes.FilterOperatorNotContains:
|
||||
operator = qbtypes.FilterOperatorNotEqual
|
||||
}
|
||||
}
|
||||
|
||||
arrayCond, err := c.buildArrayMembershipCondition(node, operator, value, sb)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
conditions = append(conditions, arrayCond)
|
||||
// or the conditions together
|
||||
return sb.Or(conditions...), nil
|
||||
}
|
||||
|
||||
return c.buildPrimitiveTerminalCondition(node, operator, value, sb)
|
||||
}
|
||||
|
||||
// buildPrimitiveTerminalCondition builds the condition if the terminal node is a primitive type
|
||||
// it handles the data type collisions and utilizes indexes for the condition if available.
|
||||
func (c *jsonConditionBuilder) buildPrimitiveTerminalCondition(node *telemetrytypes.JSONAccessNode, operator qbtypes.FilterOperator, value any, sb *sqlbuilder.SelectBuilder) (string, error) {
|
||||
fieldPath := node.FieldPath()
|
||||
conditions := []string{}
|
||||
var formattedValue = value
|
||||
if operator.IsStringSearchOperator() {
|
||||
formattedValue = querybuilder.FormatValueForContains(value)
|
||||
}
|
||||
|
||||
elemType := node.TerminalConfig.ElemType
|
||||
fieldExpr := fmt.Sprintf("dynamicElement(%s, '%s')", fieldPath, elemType.StringValue())
|
||||
fieldExpr, formattedValue = querybuilder.DataTypeCollisionHandledFieldName(node.TerminalConfig.Key, formattedValue, fieldExpr, operator)
|
||||
|
||||
// utilize indexes for the condition if available
|
||||
indexed := slices.ContainsFunc(node.TerminalConfig.Key.Indexes, func(index telemetrytypes.JSONDataTypeIndex) bool {
|
||||
return index.Type == elemType && index.ColumnExpression == fieldPath
|
||||
})
|
||||
if elemType.IndexSupported && indexed {
|
||||
indexedExpr := assumeNotNull(fieldPath, elemType)
|
||||
emptyValue := func() any {
|
||||
switch elemType {
|
||||
case telemetrytypes.String:
|
||||
return ""
|
||||
case telemetrytypes.Int64, telemetrytypes.Float64, telemetrytypes.Bool:
|
||||
return 0
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}()
|
||||
|
||||
// switch the operator and value for exists and not exists
|
||||
switch operator {
|
||||
case qbtypes.FilterOperatorExists:
|
||||
operator = qbtypes.FilterOperatorNotEqual
|
||||
value = emptyValue
|
||||
case qbtypes.FilterOperatorNotExists:
|
||||
operator = qbtypes.FilterOperatorEqual
|
||||
value = emptyValue
|
||||
default:
|
||||
// do nothing
|
||||
}
|
||||
|
||||
indexedExpr, indexedComparisonValue := querybuilder.DataTypeCollisionHandledFieldName(node.TerminalConfig.Key, formattedValue, indexedExpr, operator)
|
||||
cond, err := c.applyOperator(sb, indexedExpr, operator, indexedComparisonValue)
|
||||
for _, node := range c.key.JSONPlan {
|
||||
condition, err := c.recurseArrayHops(node, operator, value, sb)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// if qb has a definitive value, we can skip adding a condition to
|
||||
// check the existence of the path in the json column
|
||||
if value != emptyValue {
|
||||
return cond, nil
|
||||
}
|
||||
|
||||
conditions = append(conditions, cond)
|
||||
// Switch operator to EXISTS since indexed paths on assumedNotNull, indexes will always have a default value
|
||||
// So we flip the operator to Exists and filter the rows that actually have the value
|
||||
operator = qbtypes.FilterOperatorExists
|
||||
conditions = append(conditions, condition)
|
||||
}
|
||||
|
||||
cond, err := c.applyOperator(sb, fieldExpr, operator, formattedValue)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
conditions = append(conditions, cond)
|
||||
if len(conditions) > 1 {
|
||||
return sb.And(conditions...), nil
|
||||
}
|
||||
return conditions[0], nil
|
||||
return sb.Or(conditions...), nil
|
||||
}
|
||||
|
||||
// buildArrayMembershipCondition handles array membership checks.
|
||||
func (c *jsonConditionBuilder) buildArrayMembershipCondition(node *telemetrytypes.JSONAccessNode, operator qbtypes.FilterOperator, value any, sb *sqlbuilder.SelectBuilder) (string, error) {
|
||||
arrayPath := node.FieldPath()
|
||||
localKeyCopy := *node.TerminalConfig.Key
|
||||
// create typed array out of a dynamic array
|
||||
filteredDynamicExpr := func() string {
|
||||
// Change the field data type from []dynamic to the value type
|
||||
// since we've filtered the value type out of the dynamic array, we need to change the field data corresponding to the value type
|
||||
localKeyCopy.FieldDataType = telemetrytypes.MappingJSONDataTypeToFieldDataType[telemetrytypes.ScalerTypeToArrayType[c.valueType]]
|
||||
|
||||
baseArrayDynamicExpr := fmt.Sprintf("dynamicElement(%s, 'Array(Dynamic)')", arrayPath)
|
||||
return fmt.Sprintf("arrayMap(x->dynamicElement(x, '%s'), arrayFilter(x->(dynamicType(x) = '%s'), %s))",
|
||||
c.valueType.StringValue(),
|
||||
c.valueType.StringValue(),
|
||||
baseArrayDynamicExpr)
|
||||
}
|
||||
typedArrayExpr := func() string {
|
||||
return fmt.Sprintf("dynamicElement(%s, '%s')", arrayPath, node.TerminalConfig.ElemType.StringValue())
|
||||
}
|
||||
|
||||
var arrayExpr string
|
||||
if node.TerminalConfig.ElemType == telemetrytypes.ArrayDynamic {
|
||||
arrayExpr = filteredDynamicExpr()
|
||||
} else {
|
||||
arrayExpr = typedArrayExpr()
|
||||
}
|
||||
|
||||
key := "x"
|
||||
fieldExpr, value := querybuilder.DataTypeCollisionHandledFieldName(&localKeyCopy, value, key, operator)
|
||||
op, err := c.applyOperator(sb, fieldExpr, operator, value)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return fmt.Sprintf("arrayExists(%s -> %s, %s)", key, op, arrayExpr), nil
|
||||
}
|
||||
|
||||
// recurseArrayHops recursively builds array traversal conditions.
|
||||
func (c *jsonConditionBuilder) recurseArrayHops(current *telemetrytypes.JSONAccessNode, operator qbtypes.FilterOperator, value any, sb *sqlbuilder.SelectBuilder) (string, error) {
|
||||
if current == nil {
|
||||
return "", errors.NewInternalf(CodeArrayNavigationFailed, "navigation failed, current node is nil")
|
||||
@@ -222,6 +75,33 @@ func (c *jsonConditionBuilder) recurseArrayHops(current *telemetrytypes.JSONAcce
|
||||
return terminalCond, nil
|
||||
}
|
||||
|
||||
// apply NOT at top level arrayExists so that any subsequent arrayExists fails we count it as true (matching log)
|
||||
yes, operator := applyNotCondition(operator)
|
||||
condition, err := c.buildAccessNodeBranches(current, operator, value, sb)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if yes {
|
||||
return sb.Not(condition), nil
|
||||
}
|
||||
|
||||
return condition, nil
|
||||
}
|
||||
|
||||
func applyNotCondition(operator qbtypes.FilterOperator) (bool, qbtypes.FilterOperator) {
|
||||
if operator.IsNegativeOperator() {
|
||||
return true, operator.Inverse()
|
||||
}
|
||||
return false, operator
|
||||
}
|
||||
|
||||
// buildAccessNodeBranches builds conditions for each branch of the access node.
|
||||
func (c *jsonConditionBuilder) buildAccessNodeBranches(current *telemetrytypes.JSONAccessNode, operator qbtypes.FilterOperator, value any, sb *sqlbuilder.SelectBuilder) (string, error) {
|
||||
if current == nil {
|
||||
return "", errors.NewInternalf(CodeArrayNavigationFailed, "navigation failed, current node is nil")
|
||||
}
|
||||
|
||||
currAlias := current.Alias()
|
||||
fieldPath := current.FieldPath()
|
||||
// Determine availability of Array(JSON) and Array(Dynamic) at this hop
|
||||
@@ -256,6 +136,200 @@ func (c *jsonConditionBuilder) recurseArrayHops(current *telemetrytypes.JSONAcce
|
||||
return sb.Or(branches...), nil
|
||||
}
|
||||
|
||||
// buildTerminalCondition creates the innermost condition.
|
||||
func (c *jsonConditionBuilder) buildTerminalCondition(node *telemetrytypes.JSONAccessNode, operator qbtypes.FilterOperator, value any, sb *sqlbuilder.SelectBuilder) (string, error) {
|
||||
if node.TerminalConfig.ElemType.IsArray {
|
||||
// Note: here applyNotCondition will return true only if; top level path is an array; and operator is a negative operator
|
||||
// Otherwise this code will be triggered by buildAccessNodeBranches; Where operator would've been already inverted if needed.
|
||||
yes, operator := applyNotCondition(operator)
|
||||
cond, err := c.buildTerminalArrayCondition(node, operator, value, sb)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if yes {
|
||||
return sb.Not(cond), nil
|
||||
}
|
||||
return cond, nil
|
||||
}
|
||||
|
||||
return c.buildPrimitiveTerminalCondition(node, operator, value, sb)
|
||||
}
|
||||
|
||||
func getEmptyValue(elemType telemetrytypes.JSONDataType) any {
|
||||
switch elemType {
|
||||
case telemetrytypes.String:
|
||||
return ""
|
||||
case telemetrytypes.Int64, telemetrytypes.Float64, telemetrytypes.Bool:
|
||||
return 0
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *jsonConditionBuilder) terminalIndexedCondition(node *telemetrytypes.JSONAccessNode, operator qbtypes.FilterOperator, value any, sb *sqlbuilder.SelectBuilder) (string, error) {
|
||||
fieldPath := node.FieldPath()
|
||||
if strings.Contains(fieldPath, telemetrytypes.ArraySepSuffix) {
|
||||
return "", errors.NewInternalf(CodeArrayNavigationFailed, "can not build index condition for array field %s", fieldPath)
|
||||
}
|
||||
|
||||
elemType := node.TerminalConfig.ElemType
|
||||
dynamicExpr := fmt.Sprintf("dynamicElement(%s, '%s')", fieldPath, elemType.StringValue())
|
||||
indexedExpr := assumeNotNull(dynamicExpr)
|
||||
|
||||
// switch the operator and value for exists and not exists
|
||||
switch operator {
|
||||
case qbtypes.FilterOperatorExists:
|
||||
operator = qbtypes.FilterOperatorNotEqual
|
||||
value = getEmptyValue(elemType)
|
||||
case qbtypes.FilterOperatorNotExists:
|
||||
operator = qbtypes.FilterOperatorEqual
|
||||
value = getEmptyValue(elemType)
|
||||
default:
|
||||
// do nothing
|
||||
}
|
||||
|
||||
indexedExpr, formattedValue := querybuilder.DataTypeCollisionHandledFieldName(node.TerminalConfig.Key, value, indexedExpr, operator)
|
||||
cond, err := c.applyOperator(sb, indexedExpr, operator, formattedValue)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return cond, nil
|
||||
}
|
||||
|
||||
// buildPrimitiveTerminalCondition builds the condition if the terminal node is a primitive type
|
||||
// it handles the data type collisions and utilizes indexes for the condition if available.
|
||||
func (c *jsonConditionBuilder) buildPrimitiveTerminalCondition(node *telemetrytypes.JSONAccessNode, operator qbtypes.FilterOperator, value any, sb *sqlbuilder.SelectBuilder) (string, error) {
|
||||
fieldPath := node.FieldPath()
|
||||
conditions := []string{}
|
||||
|
||||
// utilize indexes for the condition if available
|
||||
//
|
||||
// Note: Indexing code doesn't get executed for Array Nested fields because they can not be indexed
|
||||
indexed := slices.ContainsFunc(node.TerminalConfig.Key.Indexes, func(index telemetrytypes.JSONDataTypeIndex) bool {
|
||||
return index.Type == node.TerminalConfig.ElemType
|
||||
})
|
||||
if node.TerminalConfig.ElemType.IndexSupported && indexed {
|
||||
indexCond, err := c.terminalIndexedCondition(node, operator, value, sb)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// if qb has a definitive value, we can skip adding a condition to
|
||||
// check the existence of the path in the json column
|
||||
if value != nil && value != getEmptyValue(node.TerminalConfig.ElemType) {
|
||||
return indexCond, nil
|
||||
}
|
||||
|
||||
conditions = append(conditions, indexCond)
|
||||
|
||||
// Switch operator to EXISTS except when operator is NOT EXISTS since
|
||||
// indexed paths on assumedNotNull, indexes will always have a default
|
||||
// value so we flip the operator to Exists and filter the rows that
|
||||
// actually have the value
|
||||
if operator != qbtypes.FilterOperatorNotExists {
|
||||
operator = qbtypes.FilterOperatorExists
|
||||
}
|
||||
}
|
||||
|
||||
var formattedValue = value
|
||||
if operator.IsStringSearchOperator() {
|
||||
formattedValue = querybuilder.FormatValueForContains(value)
|
||||
}
|
||||
|
||||
fieldExpr := fmt.Sprintf("dynamicElement(%s, '%s')", fieldPath, node.TerminalConfig.ElemType.StringValue())
|
||||
|
||||
// if operator is negative and has a value comparison i.e. excluding EXISTS and NOT EXISTS, we need to assume that the field exists everywhere
|
||||
//
|
||||
// Note: here applyNotCondition will return true only if; top level path is being queried and operator is a negative operator
|
||||
// Otherwise this code will be triggered by buildAccessNodeBranches; Where operator would've been already inverted if needed.
|
||||
if node.IsNonNestedPath() {
|
||||
yes, _ := applyNotCondition(operator)
|
||||
if yes {
|
||||
switch operator {
|
||||
case qbtypes.FilterOperatorNotExists:
|
||||
// skip
|
||||
default:
|
||||
fieldExpr = assumeNotNull(fieldExpr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fieldExpr, formattedValue = querybuilder.DataTypeCollisionHandledFieldName(node.TerminalConfig.Key, formattedValue, fieldExpr, operator)
|
||||
cond, err := c.applyOperator(sb, fieldExpr, operator, formattedValue)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
conditions = append(conditions, cond)
|
||||
if len(conditions) > 1 {
|
||||
return sb.And(conditions...), nil
|
||||
}
|
||||
return conditions[0], nil
|
||||
}
|
||||
|
||||
func (c *jsonConditionBuilder) buildTerminalArrayCondition(node *telemetrytypes.JSONAccessNode, operator qbtypes.FilterOperator, value any, sb *sqlbuilder.SelectBuilder) (string, error) {
|
||||
conditions := []string{}
|
||||
// if operator is a String search Operator, then we need to build one more String comparison condition along with the Strict match condition
|
||||
if operator.IsStringSearchOperator() {
|
||||
formattedValue := querybuilder.FormatValueForContains(value)
|
||||
arrayCond, err := c.buildArrayMembershipCondition(node, operator, formattedValue, sb)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
conditions = append(conditions, arrayCond)
|
||||
|
||||
// switch operator for array membership checks
|
||||
switch operator {
|
||||
case qbtypes.FilterOperatorContains:
|
||||
operator = qbtypes.FilterOperatorEqual
|
||||
case qbtypes.FilterOperatorNotContains:
|
||||
operator = qbtypes.FilterOperatorNotEqual
|
||||
}
|
||||
}
|
||||
|
||||
arrayCond, err := c.buildArrayMembershipCondition(node, operator, value, sb)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
conditions = append(conditions, arrayCond)
|
||||
if len(conditions) > 1 {
|
||||
return sb.Or(conditions...), nil
|
||||
}
|
||||
|
||||
return conditions[0], nil
|
||||
}
|
||||
|
||||
// buildArrayMembershipCondition builds condition of the part where Arrays becomes primitive typed Arrays
|
||||
// e.g. [300, 404, 500], and value operations will work on the array elements.
|
||||
func (c *jsonConditionBuilder) buildArrayMembershipCondition(node *telemetrytypes.JSONAccessNode, operator qbtypes.FilterOperator, value any, sb *sqlbuilder.SelectBuilder) (string, error) {
|
||||
arrayPath := node.FieldPath()
|
||||
// create typed array out of a dynamic array
|
||||
filteredDynamicExpr := func() string {
|
||||
baseArrayDynamicExpr := fmt.Sprintf("dynamicElement(%s, 'Array(Dynamic)')", arrayPath)
|
||||
return fmt.Sprintf("arrayFilter(x->(dynamicType(x) IN ('String', 'Int64', 'Float64', 'Bool')), %s)",
|
||||
baseArrayDynamicExpr)
|
||||
}
|
||||
typedArrayExpr := func() string {
|
||||
return fmt.Sprintf("dynamicElement(%s, '%s')", arrayPath, node.TerminalConfig.ElemType.StringValue())
|
||||
}
|
||||
|
||||
var arrayExpr string
|
||||
if node.TerminalConfig.ElemType == telemetrytypes.ArrayDynamic {
|
||||
arrayExpr = filteredDynamicExpr()
|
||||
} else {
|
||||
arrayExpr = typedArrayExpr()
|
||||
}
|
||||
|
||||
key := "x"
|
||||
fieldExpr, value := querybuilder.DataTypeCollisionHandledFieldName(node.TerminalConfig.Key, value, key, operator)
|
||||
op, err := c.applyOperator(sb, fieldExpr, operator, value)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return fmt.Sprintf("arrayExists(%s -> %s, %s)", key, op, arrayExpr), nil
|
||||
}
|
||||
|
||||
func (c *jsonConditionBuilder) applyOperator(sb *sqlbuilder.SelectBuilder, fieldExpr string, operator qbtypes.FilterOperator, value any) (string, error) {
|
||||
switch operator {
|
||||
case qbtypes.FilterOperatorEqual:
|
||||
@@ -317,6 +391,6 @@ func (c *jsonConditionBuilder) applyOperator(sb *sqlbuilder.SelectBuilder, field
|
||||
}
|
||||
}
|
||||
|
||||
func assumeNotNull(column string, elemType telemetrytypes.JSONDataType) string {
|
||||
return fmt.Sprintf("assumeNotNull(dynamicElement(%s, '%s'))", column, elemType.StringValue())
|
||||
func assumeNotNull(fieldExpr string) string {
|
||||
return fmt.Sprintf("assumeNotNull(%s)", fieldExpr)
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,10 +1,8 @@
|
||||
package cloudintegrationtypes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
@@ -35,6 +33,8 @@ type GettableAccounts struct {
|
||||
Accounts []*Account `json:"accounts" required:"true" nullable:"false"`
|
||||
}
|
||||
|
||||
type GettableAccount = Account
|
||||
|
||||
type UpdatableAccount struct {
|
||||
Config *AccountConfig `json:"config" required:"true" nullable:"false"`
|
||||
}
|
||||
@@ -42,169 +42,3 @@ type UpdatableAccount struct {
|
||||
type AWSAccountConfig struct {
|
||||
Regions []string `json:"regions" required:"true" nullable:"false"`
|
||||
}
|
||||
|
||||
func NewAccount(orgID valuer.UUID, provider CloudProviderType, config *AccountConfig) *Account {
|
||||
return &Account{
|
||||
Identifiable: types.Identifiable{
|
||||
ID: valuer.GenerateUUID(),
|
||||
},
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
OrgID: orgID,
|
||||
Provider: provider,
|
||||
Config: config,
|
||||
}
|
||||
}
|
||||
|
||||
func NewAccountFromStorable(storableAccount *StorableCloudIntegration) (*Account, error) {
|
||||
// config can not be empty
|
||||
if storableAccount.Config == "" {
|
||||
return nil, errors.NewInternalf(errors.CodeInternal, "config is empty for account with id: %s", storableAccount.ID)
|
||||
}
|
||||
|
||||
account := &Account{
|
||||
Identifiable: storableAccount.Identifiable,
|
||||
TimeAuditable: storableAccount.TimeAuditable,
|
||||
ProviderAccountID: storableAccount.AccountID,
|
||||
Provider: storableAccount.Provider,
|
||||
RemovedAt: storableAccount.RemovedAt,
|
||||
OrgID: storableAccount.OrgID,
|
||||
Config: new(AccountConfig),
|
||||
}
|
||||
|
||||
switch storableAccount.Provider {
|
||||
case CloudProviderTypeAWS:
|
||||
awsConfig := new(AWSAccountConfig)
|
||||
err := json.Unmarshal([]byte(storableAccount.Config), awsConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
account.Config.AWS = awsConfig
|
||||
}
|
||||
|
||||
if storableAccount.LastAgentReport != nil {
|
||||
account.AgentReport = &AgentReport{
|
||||
TimestampMillis: storableAccount.LastAgentReport.TimestampMillis,
|
||||
Data: storableAccount.LastAgentReport.Data,
|
||||
}
|
||||
}
|
||||
|
||||
return account, nil
|
||||
}
|
||||
|
||||
func NewAccountsFromStorables(storableAccounts []*StorableCloudIntegration) ([]*Account, error) {
|
||||
accounts := make([]*Account, 0, len(storableAccounts))
|
||||
for _, storableAccount := range storableAccounts {
|
||||
account, err := NewAccountFromStorable(storableAccount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
accounts = append(accounts, account)
|
||||
}
|
||||
|
||||
return accounts, nil
|
||||
}
|
||||
|
||||
func (account *Account) Update(config *AccountConfig) error {
|
||||
if account.RemovedAt != nil {
|
||||
return errors.New(errors.TypeUnsupported, ErrCodeCloudIntegrationRemoved, "this operation is not supported for a removed cloud integration account")
|
||||
}
|
||||
account.Config = config
|
||||
account.UpdatedAt = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (account *Account) IsRemoved() bool {
|
||||
return account.RemovedAt != nil
|
||||
}
|
||||
|
||||
func NewAccountConfigFromPostableArtifact(provider CloudProviderType, artifact *PostableConnectionArtifact) (*AccountConfig, error) {
|
||||
switch provider {
|
||||
case CloudProviderTypeAWS:
|
||||
if artifact.Config.Aws == nil {
|
||||
return nil, errors.NewInternalf(errors.CodeInternal, "AWS artifact is nil")
|
||||
}
|
||||
return &AccountConfig{
|
||||
AWS: &AWSAccountConfig{
|
||||
Regions: artifact.Config.Aws.Regions,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput, "invalid cloud provider: %s", provider.StringValue())
|
||||
}
|
||||
|
||||
func NewArtifactRequestFromPostableArtifact(provider CloudProviderType, artifact *PostableConnectionArtifact) (*ConnectionArtifactRequest, error) {
|
||||
req := &ConnectionArtifactRequest{
|
||||
Credentials: artifact.Credentials,
|
||||
}
|
||||
|
||||
switch provider {
|
||||
case CloudProviderTypeAWS:
|
||||
if artifact.Config.Aws == nil {
|
||||
return nil, errors.NewInternalf(errors.CodeInternal, "AWS artifact is nil")
|
||||
}
|
||||
req.Config = &ConnectionArtifactRequestConfig{
|
||||
Aws: &AWSConnectionArtifactRequest{
|
||||
DeploymentRegion: artifact.Config.Aws.DeploymentRegion,
|
||||
Regions: artifact.Config.Aws.Regions,
|
||||
},
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
return nil, errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput, "invalid cloud provider: %s", provider.StringValue())
|
||||
}
|
||||
|
||||
func NewAgentReport(data map[string]any) *AgentReport {
|
||||
return &AgentReport{
|
||||
TimestampMillis: time.Now().UnixMilli(),
|
||||
Data: data,
|
||||
}
|
||||
}
|
||||
|
||||
// ToJSON return JSON bytes for the provider's config
|
||||
// thats why not naming it MarshalJSON(), as it will interfere with default JSON marshalling of AccountConfig struct.
|
||||
// NOTE: this entertains first non-null provider's config.
|
||||
func (config *AccountConfig) ToJSON() ([]byte, error) {
|
||||
if config.AWS != nil {
|
||||
return json.Marshal(config.AWS)
|
||||
}
|
||||
|
||||
return nil, errors.NewInternalf(errors.CodeInternal, "no provider account config found")
|
||||
}
|
||||
|
||||
func (updatable *UpdatableAccount) Validate(provider CloudProviderType) error {
|
||||
if updatable.Config == nil {
|
||||
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
|
||||
"config is required")
|
||||
}
|
||||
|
||||
switch provider {
|
||||
case CloudProviderTypeAWS:
|
||||
if updatable.Config.AWS == nil {
|
||||
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
|
||||
"aws configuration is required")
|
||||
}
|
||||
|
||||
if len(updatable.Config.AWS.Regions) == 0 {
|
||||
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
|
||||
"at least one region is required")
|
||||
}
|
||||
|
||||
for _, region := range updatable.Config.AWS.Regions {
|
||||
if _, ok := ValidAWSRegions[region]; !ok {
|
||||
return errors.Newf(errors.TypeInvalidInput, ErrCodeInvalidCloudRegion,
|
||||
"invalid AWS region: %s", region)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput,
|
||||
"invalid cloud provider: %s", provider.StringValue())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -13,16 +13,10 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
ErrCodeCloudIntegrationInvalidConfig = errors.MustNewCode("cloud_integration_invalid_config")
|
||||
ErrCodeUnsupported = errors.MustNewCode("cloud_integration_unsupported")
|
||||
ErrCodeCloudIntegrationNotFound = errors.MustNewCode("cloud_integration_not_found")
|
||||
ErrCodeCloudIntegrationAlreadyExists = errors.MustNewCode("cloud_integration_already_exists")
|
||||
ErrCodeCloudIntegrationAlreadyConnected = errors.MustNewCode("cloud_integration_already_connected")
|
||||
ErrCodeCloudIntegrationRemoved = errors.MustNewCode("cloud_integration_removed")
|
||||
ErrCodeInvalidInput = errors.MustNewCode("cloud_integration_invalid_input")
|
||||
ErrCodeCloudIntegrationServiceNotFound = errors.MustNewCode("cloud_integration_service_not_found")
|
||||
ErrCodeCloudIntegrationServiceAlreadyExists = errors.MustNewCode("cloud_integration_service_already_exists")
|
||||
ErrCodeServiceDefinitionNotFound = errors.MustNewCode("service_definition_not_found")
|
||||
)
|
||||
|
||||
// StorableCloudIntegration represents a cloud integration stored in the database.
|
||||
@@ -58,26 +52,6 @@ type StorableCloudIntegrationService struct {
|
||||
CloudIntegrationID valuer.UUID `bun:"cloud_integration_id,type:text"`
|
||||
}
|
||||
|
||||
// Following Service config types are only internally used to store service config in DB and use JSON snake case keys for backward compatibility.
|
||||
|
||||
type StorableServiceConfig struct {
|
||||
AWS *StorableAWSServiceConfig
|
||||
}
|
||||
|
||||
type StorableAWSServiceConfig struct {
|
||||
Logs *StorableAWSLogsServiceConfig `json:"logs,omitempty"`
|
||||
Metrics *StorableAWSMetricsServiceConfig `json:"metrics,omitempty"`
|
||||
}
|
||||
|
||||
type StorableAWSLogsServiceConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
S3Buckets map[string][]string `json:"s3_buckets,omitempty"` // region -> list of buckets in that region
|
||||
}
|
||||
|
||||
type StorableAWSMetricsServiceConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
// Scan scans value from DB.
|
||||
func (r *StorableAgentReport) Scan(src any) error {
|
||||
var data []byte
|
||||
@@ -94,6 +68,10 @@ func (r *StorableAgentReport) Scan(src any) error {
|
||||
|
||||
// Value creates value to be stored in DB.
|
||||
func (r *StorableAgentReport) Value() (driver.Value, error) {
|
||||
if r == nil {
|
||||
return nil, errors.NewInternalf(errors.CodeInternal, "agent report is nil")
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(r)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(
|
||||
@@ -103,107 +81,3 @@ func (r *StorableAgentReport) Value() (driver.Value, error) {
|
||||
// Return as string instead of []byte to ensure PostgreSQL stores as text, not bytes
|
||||
return string(serialized), nil
|
||||
}
|
||||
|
||||
func NewStorableCloudIntegration(account *Account) (*StorableCloudIntegration, error) {
|
||||
configBytes, err := account.Config.ToJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
storableAccount := &StorableCloudIntegration{
|
||||
Identifiable: account.Identifiable,
|
||||
TimeAuditable: account.TimeAuditable,
|
||||
Provider: account.Provider,
|
||||
Config: string(configBytes),
|
||||
AccountID: account.ProviderAccountID,
|
||||
OrgID: account.OrgID,
|
||||
RemovedAt: account.RemovedAt,
|
||||
}
|
||||
|
||||
if account.AgentReport != nil {
|
||||
storableAccount.LastAgentReport = &StorableAgentReport{
|
||||
TimestampMillis: account.AgentReport.TimestampMillis,
|
||||
Data: account.AgentReport.Data,
|
||||
}
|
||||
}
|
||||
|
||||
return storableAccount, nil
|
||||
}
|
||||
|
||||
// NewStorableCloudIntegrationService creates a new StorableCloudIntegrationService with
|
||||
// generated ID and timestamps from a CloudIntegrationService and its serialized config JSON.
|
||||
func NewStorableCloudIntegrationService(svc *CloudIntegrationService, configJSON string) *StorableCloudIntegrationService {
|
||||
return &StorableCloudIntegrationService{
|
||||
Identifiable: svc.Identifiable,
|
||||
TimeAuditable: svc.TimeAuditable,
|
||||
Type: svc.Type,
|
||||
Config: configJSON,
|
||||
CloudIntegrationID: svc.CloudIntegrationID,
|
||||
}
|
||||
}
|
||||
|
||||
func (account *StorableCloudIntegration) Update(providerAccountID *string, agentReport *AgentReport) {
|
||||
account.AccountID = providerAccountID
|
||||
if agentReport != nil {
|
||||
account.LastAgentReport = &StorableAgentReport{
|
||||
TimestampMillis: agentReport.TimestampMillis,
|
||||
Data: agentReport.Data,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// following StorableServiceConfig related functions are helper functions to convert between JSON string and ServiceConfig domain struct.
|
||||
func newStorableServiceConfig(provider CloudProviderType, serviceID ServiceID, serviceConfig *ServiceConfig, supportedSignals *SupportedSignals) *StorableServiceConfig {
|
||||
switch provider {
|
||||
case CloudProviderTypeAWS:
|
||||
storableAWSServiceConfig := new(StorableAWSServiceConfig)
|
||||
|
||||
if supportedSignals.Logs {
|
||||
storableAWSServiceConfig.Logs = &StorableAWSLogsServiceConfig{
|
||||
Enabled: serviceConfig.AWS.Logs.Enabled,
|
||||
}
|
||||
|
||||
if serviceID == AWSServiceS3Sync {
|
||||
storableAWSServiceConfig.Logs.S3Buckets = serviceConfig.AWS.Logs.S3Buckets
|
||||
}
|
||||
}
|
||||
|
||||
if supportedSignals.Metrics {
|
||||
storableAWSServiceConfig.Metrics = &StorableAWSMetricsServiceConfig{
|
||||
Enabled: serviceConfig.AWS.Metrics.Enabled,
|
||||
}
|
||||
}
|
||||
|
||||
return &StorableServiceConfig{AWS: storableAWSServiceConfig}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func newStorableServiceConfigFromJSON(provider CloudProviderType, jsonStr string) (*StorableServiceConfig, error) {
|
||||
switch provider {
|
||||
case CloudProviderTypeAWS:
|
||||
awsConfig := new(StorableAWSServiceConfig)
|
||||
err := json.Unmarshal([]byte(jsonStr), awsConfig)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't parse AWS service config JSON")
|
||||
}
|
||||
return &StorableServiceConfig{AWS: awsConfig}, nil
|
||||
default:
|
||||
return nil, errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput, "invalid cloud provider: %s", provider.StringValue())
|
||||
}
|
||||
}
|
||||
|
||||
func (config *StorableServiceConfig) toJSON(provider CloudProviderType) ([]byte, error) {
|
||||
switch provider {
|
||||
case CloudProviderTypeAWS:
|
||||
jsonBytes, err := json.Marshal(config.AWS)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't serialize AWS service config to JSON")
|
||||
}
|
||||
|
||||
return jsonBytes, nil
|
||||
default:
|
||||
return nil, errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput, "invalid cloud provider: %s", provider.StringValue())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package cloudintegrationtypes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
@@ -16,14 +14,10 @@ var (
|
||||
CloudProviderTypeAzure = CloudProviderType{valuer.NewString("azure")}
|
||||
|
||||
// errors.
|
||||
ErrCodeCloudProviderInvalidInput = errors.MustNewCode("cloud_integration_invalid_cloud_provider")
|
||||
ErrCodeCloudProviderInvalidInput = errors.MustNewCode("invalid_cloud_provider")
|
||||
|
||||
AWSIntegrationUserEmail = valuer.MustNewEmail("aws-integration@signoz.io")
|
||||
AzureIntegrationUserEmail = valuer.MustNewEmail("azure-integration@signoz.io")
|
||||
|
||||
CloudFormationQuickCreateBaseURL = valuer.NewString("https://%s.console.aws.amazon.com/cloudformation/home")
|
||||
AgentCloudFormationTemplateS3Path = valuer.NewString("https://signoz-integrations.s3.us-east-1.amazonaws.com/aws-quickcreate-template-%s.json")
|
||||
AgentCloudFormationBaseStackName = valuer.NewString("signoz-integration")
|
||||
)
|
||||
|
||||
// CloudIntegrationUserEmails is the list of valid emails for Cloud One Click integrations.
|
||||
@@ -45,18 +39,3 @@ func NewCloudProvider(provider string) (CloudProviderType, error) {
|
||||
return CloudProviderType{}, errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput, "invalid cloud provider: %s", provider)
|
||||
}
|
||||
}
|
||||
|
||||
func GetCloudProviderEmail(provider CloudProviderType) (valuer.Email, error) {
|
||||
switch provider {
|
||||
case CloudProviderTypeAWS:
|
||||
return AWSIntegrationUserEmail, nil
|
||||
case CloudProviderTypeAzure:
|
||||
return AzureIntegrationUserEmail, nil
|
||||
default:
|
||||
return valuer.Email{}, errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput, "invalid cloud provider: %s", provider.StringValue())
|
||||
}
|
||||
}
|
||||
|
||||
func NewIngestionKeyName(provider CloudProviderType) string {
|
||||
return fmt.Sprintf("%s-integration", provider.StringValue())
|
||||
}
|
||||
|
||||
@@ -3,26 +3,12 @@ package cloudintegrationtypes
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type SignozCredentials struct {
|
||||
SigNozAPIURL string `json:"sigNozApiURL" required:"true"`
|
||||
SigNozAPIKey string `json:"sigNozApiKey" required:"true"` // PAT
|
||||
IngestionURL string `json:"ingestionUrl" required:"true"`
|
||||
IngestionKey string `json:"ingestionKey" required:"true"`
|
||||
}
|
||||
|
||||
type ConnectionArtifactRequest struct {
|
||||
Config *ConnectionArtifactRequestConfig `json:"config" required:"true"`
|
||||
Credentials *SignozCredentials `json:"credentials" required:"true"`
|
||||
}
|
||||
|
||||
type ConnectionArtifactRequestConfig struct {
|
||||
// as agent version is common for all providers, we can keep it at top level of this struct
|
||||
AgentVersion string
|
||||
Aws *AWSConnectionArtifactRequest `json:"aws" required:"true" nullable:"false"`
|
||||
// required till new providers are added
|
||||
Aws *AWSConnectionArtifactRequest `json:"aws" required:"true" nullable:"false"`
|
||||
}
|
||||
|
||||
type AWSConnectionArtifactRequest struct {
|
||||
@@ -47,8 +33,8 @@ type GettableAccountWithArtifact struct {
|
||||
}
|
||||
|
||||
type AgentCheckInRequest struct {
|
||||
ProviderAccountID string `json:"providerAccountId" required:"false"`
|
||||
CloudIntegrationID valuer.UUID `json:"cloudIntegrationId" required:"false"`
|
||||
ProviderAccountID string `json:"providerAccountId" required:"false"`
|
||||
CloudIntegrationID string `json:"cloudIntegrationId" required:"false"`
|
||||
|
||||
Data map[string]any `json:"data" required:"true" nullable:"true"`
|
||||
}
|
||||
@@ -81,8 +67,8 @@ type GettableAgentCheckInResponse struct {
|
||||
// IntegrationConfig older integration config struct for backward compatibility,
|
||||
// this will be eventually removed once agents are updated to use new struct.
|
||||
type IntegrationConfig struct {
|
||||
EnabledRegions []string `json:"enabled_regions" required:"true" nullable:"false"` // backward compatible
|
||||
Telemetry *OldAWSCollectionStrategy `json:"telemetry" required:"true" nullable:"false"` // backward compatible
|
||||
EnabledRegions []string `json:"enabled_regions" required:"true" nullable:"false"` // backward compatible
|
||||
Telemetry *AWSCollectionStrategy `json:"telemetry" required:"true" nullable:"false"` // backward compatible
|
||||
}
|
||||
|
||||
type ProviderIntegrationConfig struct {
|
||||
@@ -93,110 +79,3 @@ type AWSIntegrationConfig struct {
|
||||
EnabledRegions []string `json:"enabledRegions" required:"true" nullable:"false"`
|
||||
Telemetry *AWSCollectionStrategy `json:"telemetry" required:"true" nullable:"false"`
|
||||
}
|
||||
|
||||
// NewGettableAgentCheckInResponse constructs a backward-compatible response from an AgentCheckInResponse.
|
||||
// It populates the old snake_case fields (account_id, cloud_account_id, integration_config, removed_at)
|
||||
// from the new camelCase fields so older agents continue to work unchanged.
|
||||
// The provider parameter controls which provider-specific block is mapped into the legacy integration_config.
|
||||
func NewGettableAgentCheckInResponse(provider CloudProviderType, resp *AgentCheckInResponse) *GettableAgentCheckInResponse {
|
||||
gettable := &GettableAgentCheckInResponse{
|
||||
AccountID: resp.CloudIntegrationID,
|
||||
CloudAccountID: resp.ProviderAccountID,
|
||||
OlderRemovedAt: resp.RemovedAt,
|
||||
AgentCheckInResponse: *resp,
|
||||
}
|
||||
|
||||
switch provider {
|
||||
case CloudProviderTypeAWS:
|
||||
gettable.OlderIntegrationConfig = awsOlderIntegrationConfig(resp.IntegrationConfig)
|
||||
}
|
||||
|
||||
return gettable
|
||||
}
|
||||
|
||||
// Validate checks that the connection artifact request has a valid provider-specific block
|
||||
// with non-empty, valid regions and a valid deployment region.
|
||||
func (req *ConnectionArtifactRequest) Validate(provider CloudProviderType) error {
|
||||
if req.Config == nil || req.Credentials == nil {
|
||||
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
|
||||
"config and credentials are required")
|
||||
}
|
||||
|
||||
if req.Credentials.SigNozAPIURL == "" {
|
||||
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
|
||||
"sigNozApiURL can not be empty")
|
||||
}
|
||||
|
||||
if req.Credentials.SigNozAPIKey == "" {
|
||||
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
|
||||
"sigNozApiKey can not be empty")
|
||||
}
|
||||
|
||||
if req.Credentials.IngestionURL == "" {
|
||||
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
|
||||
"ingestionUrl can not be empty")
|
||||
}
|
||||
|
||||
if req.Credentials.IngestionKey == "" {
|
||||
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
|
||||
"ingestionKey can not be empty")
|
||||
}
|
||||
|
||||
switch provider {
|
||||
case CloudProviderTypeAWS:
|
||||
if req.Config.Aws == nil {
|
||||
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
|
||||
"aws configuration is required")
|
||||
}
|
||||
return req.Config.Aws.Validate()
|
||||
}
|
||||
|
||||
return errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput,
|
||||
"invalid cloud provider: %s", provider)
|
||||
}
|
||||
|
||||
// Validate checks that the AWS connection artifact request has a valid deployment region
|
||||
// and a non-empty list of valid regions.
|
||||
func (req *AWSConnectionArtifactRequest) Validate() error {
|
||||
if req.DeploymentRegion == "" {
|
||||
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
|
||||
"deploymentRegion is required")
|
||||
}
|
||||
if _, ok := ValidAWSRegions[req.DeploymentRegion]; !ok {
|
||||
return errors.Newf(errors.TypeInvalidInput, ErrCodeInvalidCloudRegion,
|
||||
"invalid deployment region: %s", req.DeploymentRegion)
|
||||
}
|
||||
|
||||
if len(req.Regions) == 0 {
|
||||
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
|
||||
"at least one region is required")
|
||||
}
|
||||
for _, region := range req.Regions {
|
||||
if _, ok := ValidAWSRegions[region]; !ok {
|
||||
return errors.Newf(errors.TypeInvalidInput, ErrCodeInvalidCloudRegion,
|
||||
"invalid AWS region: %s", region)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate checks that the request uses either old fields (account_id, cloud_account_id) or
|
||||
// new fields (cloudIntegrationId, providerAccountId), never a mix of both.
|
||||
func (req *PostableAgentCheckInRequest) Validate() error {
|
||||
hasOldFields := req.ID != "" || req.AccountID != ""
|
||||
hasNewFields := !req.CloudIntegrationID.IsZero() || req.ProviderAccountID != ""
|
||||
|
||||
if hasOldFields && hasNewFields {
|
||||
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
|
||||
"request must use either old fields (account_id, cloud_account_id) or new fields (cloudIntegrationId, providerAccountId), not both")
|
||||
}
|
||||
if !hasOldFields && !hasNewFields {
|
||||
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
|
||||
"request must provide either old fields (account_id, cloud_account_id) or new fields (cloudIntegrationId, providerAccountId)")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (config *ConnectionArtifactRequestConfig) AddAgentVersion(agentVersion string) {
|
||||
config.AgentVersion = agentVersion
|
||||
}
|
||||
|
||||
@@ -4,7 +4,10 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
)
|
||||
|
||||
var ErrCodeInvalidCloudRegion = errors.MustNewCode("invalid_cloud_region")
|
||||
var (
|
||||
ErrCodeInvalidCloudRegion = errors.MustNewCode("invalid_cloud_region")
|
||||
ErrCodeMismatchCloudProvider = errors.MustNewCode("cloud_provider_mismatch")
|
||||
)
|
||||
|
||||
// List of all valid cloud regions on Amazon Web Services.
|
||||
var ValidAWSRegions = map[string]struct{}{
|
||||
|
||||
@@ -2,7 +2,6 @@ package cloudintegrationtypes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
@@ -46,13 +45,13 @@ type GettableServicesMetadata struct {
|
||||
Services []*ServiceMetadata `json:"services" required:"true" nullable:"false"`
|
||||
}
|
||||
|
||||
// Service represents a cloud integration service with its definition,
|
||||
// cloud integration service is non nil only when the service entry exists in DB with ANY config (enabled or disabled).
|
||||
type Service struct {
|
||||
ServiceDefinition
|
||||
CloudIntegrationService *CloudIntegrationService `json:"cloudIntegrationService" required:"true" nullable:"true"`
|
||||
ServiceConfig *ServiceConfig `json:"serviceConfig" required:"false" nullable:"false"`
|
||||
}
|
||||
|
||||
type GettableService = Service
|
||||
|
||||
type UpdatableService struct {
|
||||
Config *ServiceConfig `json:"config" required:"true" nullable:"false"`
|
||||
}
|
||||
@@ -61,7 +60,7 @@ type ServiceDefinition struct {
|
||||
ServiceDefinitionMetadata
|
||||
Overview string `json:"overview" required:"true"` // markdown
|
||||
Assets Assets `json:"assets" required:"true"`
|
||||
SupportedSignals SupportedSignals `json:"supportedSignals" required:"true"`
|
||||
SupportedSignals SupportedSignals `json:"supported_signals" required:"true"`
|
||||
DataCollected DataCollected `json:"dataCollected" required:"true"`
|
||||
Strategy *CollectionStrategy `json:"telemetryCollectionStrategy" required:"true" nullable:"false"`
|
||||
}
|
||||
@@ -93,7 +92,7 @@ type AWSServiceConfig struct {
|
||||
// NOTE: the JSON keys are snake case for backward compatibility with existing agents.
|
||||
type AWSServiceLogsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
S3Buckets map[string][]string `json:"s3Buckets,omitempty"`
|
||||
S3Buckets map[string][]string `json:"s3_buckets,omitempty"`
|
||||
}
|
||||
|
||||
type AWSServiceMetricsConfig struct {
|
||||
@@ -122,38 +121,19 @@ type CollectedMetric struct {
|
||||
}
|
||||
|
||||
// AWSCollectionStrategy represents signal collection strategy for AWS services.
|
||||
// this is AWS specific.
|
||||
// NOTE: this structure is still using snake case, for backward compatibility,
|
||||
// with existing agents.
|
||||
type AWSCollectionStrategy struct {
|
||||
Metrics *AWSMetricsStrategy `json:"metrics,omitempty"`
|
||||
Logs *AWSLogsStrategy `json:"logs,omitempty"`
|
||||
S3Buckets map[string][]string `json:"s3Buckets,omitempty"` // Only available in S3 Sync Service Type in AWS
|
||||
}
|
||||
|
||||
// OldAWSCollectionStrategy is the backward-compatible snake_case form of AWSCollectionStrategy,
|
||||
// used in the legacy integration_config response field for older agents.
|
||||
type OldAWSCollectionStrategy struct {
|
||||
Provider string `json:"provider"`
|
||||
Metrics *OldAWSMetricsStrategy `json:"aws_metrics,omitempty"`
|
||||
Logs *OldAWSLogsStrategy `json:"aws_logs,omitempty"`
|
||||
S3Buckets map[string][]string `json:"s3_buckets,omitempty"`
|
||||
}
|
||||
|
||||
// OldAWSMetricsStrategy is the snake_case form of AWSMetricsStrategy for older agents.
|
||||
type OldAWSMetricsStrategy struct {
|
||||
StreamFilters []struct {
|
||||
Namespace string `json:"Namespace"`
|
||||
MetricNames []string `json:"MetricNames,omitempty"`
|
||||
} `json:"cloudwatch_metric_stream_filters"`
|
||||
}
|
||||
|
||||
// OldAWSLogsStrategy is the snake_case form of AWSLogsStrategy for older agents.
|
||||
type OldAWSLogsStrategy struct {
|
||||
Subscriptions []struct {
|
||||
LogGroupNamePrefix string `json:"log_group_name_prefix"`
|
||||
FilterPattern string `json:"filter_pattern"`
|
||||
} `json:"cloudwatch_logs_subscriptions"`
|
||||
Metrics *AWSMetricsStrategy `json:"aws_metrics,omitempty"`
|
||||
Logs *AWSLogsStrategy `json:"aws_logs,omitempty"`
|
||||
S3Buckets map[string][]string `json:"s3_buckets,omitempty"` // Only available in S3 Sync Service Type in AWS
|
||||
}
|
||||
|
||||
// AWSMetricsStrategy represents metrics collection strategy for AWS services.
|
||||
// this is AWS specific.
|
||||
// NOTE: this structure is still using snake case, for backward compatibility,
|
||||
// with existing agents.
|
||||
type AWSMetricsStrategy struct {
|
||||
// to be used as https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-metricstream.html#cfn-cloudwatch-metricstream-includefilters
|
||||
StreamFilters []struct {
|
||||
@@ -161,20 +141,23 @@ type AWSMetricsStrategy struct {
|
||||
// https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudwatch-metricstream-metricstreamfilter.html
|
||||
Namespace string `json:"Namespace"`
|
||||
MetricNames []string `json:"MetricNames,omitempty"`
|
||||
} `json:"cloudwatchMetricStreamFilters"`
|
||||
} `json:"cloudwatch_metric_stream_filters"`
|
||||
}
|
||||
|
||||
// AWSLogsStrategy represents logs collection strategy for AWS services.
|
||||
// this is AWS specific.
|
||||
// NOTE: this structure is still using snake case, for backward compatibility,
|
||||
// with existing agents.
|
||||
type AWSLogsStrategy struct {
|
||||
Subscriptions []struct {
|
||||
// subscribe to all logs groups with specified prefix.
|
||||
// eg: `/aws/rds/`
|
||||
LogGroupNamePrefix string `json:"logGroupNamePrefix"`
|
||||
LogGroupNamePrefix string `json:"log_group_name_prefix"`
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html
|
||||
// "" implies no filtering is required.
|
||||
FilterPattern string `json:"filterPattern"`
|
||||
} `json:"cloudwatchLogsSubscriptions"`
|
||||
FilterPattern string `json:"filter_pattern"`
|
||||
} `json:"cloudwatch_logs_subscriptions"`
|
||||
}
|
||||
|
||||
// Dashboard represents a dashboard definition for cloud integration.
|
||||
@@ -187,156 +170,12 @@ type Dashboard struct {
|
||||
Definition dashboardtypes.StorableDashboardData `json:"definition,omitempty"`
|
||||
}
|
||||
|
||||
func NewCloudIntegrationService(serviceID ServiceID, cloudIntegrationID valuer.UUID, config *ServiceConfig) *CloudIntegrationService {
|
||||
return &CloudIntegrationService{
|
||||
Identifiable: types.Identifiable{
|
||||
ID: valuer.GenerateUUID(),
|
||||
},
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
Type: serviceID,
|
||||
Config: config,
|
||||
CloudIntegrationID: cloudIntegrationID,
|
||||
}
|
||||
}
|
||||
|
||||
func NewCloudIntegrationServiceFromStorable(stored *StorableCloudIntegrationService, config *ServiceConfig) *CloudIntegrationService {
|
||||
return &CloudIntegrationService{
|
||||
Identifiable: stored.Identifiable,
|
||||
TimeAuditable: stored.TimeAuditable,
|
||||
Type: stored.Type,
|
||||
Config: config,
|
||||
CloudIntegrationID: stored.CloudIntegrationID,
|
||||
}
|
||||
}
|
||||
|
||||
func NewServiceMetadata(definition ServiceDefinition, enabled bool) *ServiceMetadata {
|
||||
return &ServiceMetadata{
|
||||
ServiceDefinitionMetadata: definition.ServiceDefinitionMetadata,
|
||||
Enabled: enabled,
|
||||
}
|
||||
}
|
||||
|
||||
func NewService(def ServiceDefinition, storableService *CloudIntegrationService) *Service {
|
||||
return &Service{
|
||||
ServiceDefinition: def,
|
||||
CloudIntegrationService: storableService,
|
||||
}
|
||||
}
|
||||
|
||||
func NewServiceConfigFromJSON(provider CloudProviderType, jsonString string) (*ServiceConfig, error) {
|
||||
storableServiceConfig, err := newStorableServiceConfigFromJSON(provider, jsonString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch provider {
|
||||
case CloudProviderTypeAWS:
|
||||
awsServiceConfig := new(AWSServiceConfig)
|
||||
|
||||
if storableServiceConfig.AWS.Logs != nil {
|
||||
awsServiceConfig.Logs = &AWSServiceLogsConfig{
|
||||
Enabled: storableServiceConfig.AWS.Logs.Enabled,
|
||||
S3Buckets: storableServiceConfig.AWS.Logs.S3Buckets,
|
||||
}
|
||||
}
|
||||
|
||||
if storableServiceConfig.AWS.Metrics != nil {
|
||||
awsServiceConfig.Metrics = &AWSServiceMetricsConfig{
|
||||
Enabled: storableServiceConfig.AWS.Metrics.Enabled,
|
||||
}
|
||||
}
|
||||
|
||||
return &ServiceConfig{AWS: awsServiceConfig}, nil
|
||||
default:
|
||||
return nil, errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput, "invalid cloud provider: %s", provider.StringValue())
|
||||
}
|
||||
}
|
||||
|
||||
// Update sets the service config.
|
||||
func (service *CloudIntegrationService) Update(config *ServiceConfig) {
|
||||
service.Config = config
|
||||
service.UpdatedAt = time.Now()
|
||||
}
|
||||
|
||||
// IsServiceEnabled returns true if the service has at least one signal (logs or metrics) enabled
|
||||
// for the given cloud provider.
|
||||
func IsServiceEnabled(provider CloudProviderType, config *ServiceConfig) bool {
|
||||
switch provider {
|
||||
case CloudProviderTypeAWS:
|
||||
logsEnabled := config.AWS.Logs != nil && config.AWS.Logs.Enabled
|
||||
metricsEnabled := config.AWS.Metrics != nil && config.AWS.Metrics.Enabled
|
||||
return logsEnabled || metricsEnabled
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// IsMetricsEnabled returns true if metrics are explicitly enabled for the given cloud provider.
|
||||
// Used to gate dashboard availability — dashboards are only shown when metrics are enabled.
|
||||
func IsMetricsEnabled(provider CloudProviderType, config *ServiceConfig) bool {
|
||||
switch provider {
|
||||
case CloudProviderTypeAWS:
|
||||
return config.AWS.Metrics != nil && config.AWS.Metrics.Enabled
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// IsLogsEnabled returns true if logs are explicitly enabled for the given cloud provider.
|
||||
func IsLogsEnabled(provider CloudProviderType, config *ServiceConfig) bool {
|
||||
switch provider {
|
||||
case CloudProviderTypeAWS:
|
||||
return config.AWS.Logs != nil && config.AWS.Logs.Enabled
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (serviceConfig *ServiceConfig) ToJSON(provider CloudProviderType, serviceID ServiceID, supportedSignals *SupportedSignals) ([]byte, error) {
|
||||
storableServiceConfig := newStorableServiceConfig(provider, serviceID, serviceConfig, supportedSignals)
|
||||
return storableServiceConfig.toJSON(provider)
|
||||
}
|
||||
|
||||
func (updatableService *UpdatableService) Validate(provider CloudProviderType, serviceID ServiceID) error {
|
||||
switch provider {
|
||||
case CloudProviderTypeAWS:
|
||||
if updatableService.Config.AWS == nil {
|
||||
return errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput, "AWS config is required for AWS service")
|
||||
}
|
||||
|
||||
if serviceID == AWSServiceS3Sync {
|
||||
if updatableService.Config.AWS.Logs == nil || updatableService.Config.AWS.Logs.S3Buckets == nil {
|
||||
return errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput, "AWS S3 Sync service requires S3 bucket configuration for logs")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
default:
|
||||
return errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput, "invalid cloud provider: %s", provider.StringValue())
|
||||
}
|
||||
}
|
||||
// UTILS
|
||||
|
||||
// GetCloudIntegrationDashboardID returns the dashboard id for a cloud integration, given the cloud provider, service id, and dashboard id.
|
||||
// This is used to generate unique dashboard ids for cloud integration, and also to parse the dashboard id to get the cloud provider and service id when needed.
|
||||
func GetCloudIntegrationDashboardID(cloudProvider CloudProviderType, svcID, dashboardID string) string {
|
||||
return fmt.Sprintf("cloud-integration--%s--%s--%s", cloudProvider.StringValue(), svcID, dashboardID)
|
||||
}
|
||||
|
||||
// ParseCloudIntegrationDashboardID parses a dashboard id generated by GetCloudIntegrationDashboardID
|
||||
// into its constituent parts (cloudProvider, serviceID, dashboardID).
|
||||
func ParseCloudIntegrationDashboardID(id string) (CloudProviderType, string, string, error) {
|
||||
parts := strings.SplitN(id, "--", 4)
|
||||
if len(parts) != 4 || parts[0] != "cloud-integration" {
|
||||
return CloudProviderType{}, "", "", errors.New(errors.TypeNotFound, ErrCodeCloudIntegrationNotFound, "invalid cloud integration dashboard id")
|
||||
}
|
||||
provider, err := NewCloudProvider(parts[1])
|
||||
if err != nil {
|
||||
return CloudProviderType{}, "", "", err
|
||||
}
|
||||
return provider, parts[2], parts[3], nil
|
||||
return fmt.Sprintf("cloud-integration--%s--%s--%s", cloudProvider, svcID, dashboardID)
|
||||
}
|
||||
|
||||
// GetDashboardsFromAssets returns the list of dashboards for the cloud provider service from definition.
|
||||
@@ -350,9 +189,9 @@ func GetDashboardsFromAssets(
|
||||
dashboards := make([]*dashboardtypes.Dashboard, 0)
|
||||
|
||||
for _, d := range assets.Dashboards {
|
||||
author := fmt.Sprintf("%s-integration", cloudProvider.StringValue())
|
||||
author := fmt.Sprintf("%s-integration", cloudProvider)
|
||||
dashboards = append(dashboards, &dashboardtypes.Dashboard{
|
||||
ID: d.ID,
|
||||
ID: GetCloudIntegrationDashboardID(cloudProvider, svcID, d.ID),
|
||||
Locked: true,
|
||||
OrgID: orgID,
|
||||
Data: d.Definition,
|
||||
@@ -369,53 +208,3 @@ func GetDashboardsFromAssets(
|
||||
|
||||
return dashboards
|
||||
}
|
||||
|
||||
// awsOlderIntegrationConfig converts a ProviderIntegrationConfig into the legacy snake_case
|
||||
// IntegrationConfig format consumed by older AWS agents. Returns nil if AWS config is absent.
|
||||
func awsOlderIntegrationConfig(cfg *ProviderIntegrationConfig) *IntegrationConfig {
|
||||
if cfg == nil || cfg.AWS == nil {
|
||||
return nil
|
||||
}
|
||||
awsCfg := cfg.AWS
|
||||
|
||||
older := &IntegrationConfig{
|
||||
EnabledRegions: awsCfg.EnabledRegions,
|
||||
}
|
||||
|
||||
if awsCfg.Telemetry == nil {
|
||||
return older
|
||||
}
|
||||
|
||||
// Older agents expect a "provider" field and fully snake_case keys inside telemetry.
|
||||
oldTelemetry := &OldAWSCollectionStrategy{
|
||||
Provider: CloudProviderTypeAWS.StringValue(),
|
||||
S3Buckets: awsCfg.Telemetry.S3Buckets,
|
||||
}
|
||||
|
||||
if awsCfg.Telemetry.Metrics != nil {
|
||||
// Convert camelCase cloudwatchMetricStreamFilters → snake_case cloudwatch_metric_stream_filters
|
||||
oldMetrics := &OldAWSMetricsStrategy{}
|
||||
for _, f := range awsCfg.Telemetry.Metrics.StreamFilters {
|
||||
oldMetrics.StreamFilters = append(oldMetrics.StreamFilters, struct {
|
||||
Namespace string `json:"Namespace"`
|
||||
MetricNames []string `json:"MetricNames,omitempty"`
|
||||
}{Namespace: f.Namespace, MetricNames: f.MetricNames})
|
||||
}
|
||||
oldTelemetry.Metrics = oldMetrics
|
||||
}
|
||||
|
||||
if awsCfg.Telemetry.Logs != nil {
|
||||
// Convert camelCase cloudwatchLogsSubscriptions → snake_case cloudwatch_logs_subscriptions
|
||||
oldLogs := &OldAWSLogsStrategy{}
|
||||
for _, s := range awsCfg.Telemetry.Logs.Subscriptions {
|
||||
oldLogs.Subscriptions = append(oldLogs.Subscriptions, struct {
|
||||
LogGroupNamePrefix string `json:"log_group_name_prefix"`
|
||||
FilterPattern string `json:"filter_pattern"`
|
||||
}{LogGroupNamePrefix: s.LogGroupNamePrefix, FilterPattern: s.FilterPattern})
|
||||
}
|
||||
oldTelemetry.Logs = oldLogs
|
||||
}
|
||||
|
||||
older.Telemetry = oldTelemetry
|
||||
return older
|
||||
}
|
||||
|
||||
@@ -38,8 +38,6 @@ type Store interface {
|
||||
|
||||
// UpdateService updates an existing cloud integration service
|
||||
UpdateService(ctx context.Context, service *StorableCloudIntegrationService) error
|
||||
|
||||
RunInTx(context.Context, func(ctx context.Context) error) error
|
||||
}
|
||||
|
||||
type ServiceDefinitionStore interface {
|
||||
|
||||
@@ -113,6 +113,29 @@ const (
|
||||
FilterOperatorNotContains
|
||||
)
|
||||
|
||||
var operatorInverseMapping = map[FilterOperator]FilterOperator{
|
||||
FilterOperatorEqual: FilterOperatorNotEqual,
|
||||
FilterOperatorNotEqual: FilterOperatorEqual,
|
||||
FilterOperatorGreaterThan: FilterOperatorLessThanOrEq,
|
||||
FilterOperatorGreaterThanOrEq: FilterOperatorLessThan,
|
||||
FilterOperatorLessThan: FilterOperatorGreaterThanOrEq,
|
||||
FilterOperatorLessThanOrEq: FilterOperatorGreaterThan,
|
||||
FilterOperatorLike: FilterOperatorNotLike,
|
||||
FilterOperatorNotLike: FilterOperatorLike,
|
||||
FilterOperatorILike: FilterOperatorNotILike,
|
||||
FilterOperatorNotILike: FilterOperatorILike,
|
||||
FilterOperatorBetween: FilterOperatorNotBetween,
|
||||
FilterOperatorNotBetween: FilterOperatorBetween,
|
||||
FilterOperatorIn: FilterOperatorNotIn,
|
||||
FilterOperatorNotIn: FilterOperatorIn,
|
||||
FilterOperatorExists: FilterOperatorNotExists,
|
||||
FilterOperatorNotExists: FilterOperatorExists,
|
||||
FilterOperatorRegexp: FilterOperatorNotRegexp,
|
||||
FilterOperatorNotRegexp: FilterOperatorRegexp,
|
||||
FilterOperatorContains: FilterOperatorNotContains,
|
||||
FilterOperatorNotContains: FilterOperatorContains,
|
||||
}
|
||||
|
||||
// AddDefaultExistsFilter returns true if addl exists filter should be added to the query
|
||||
// For the negative predicates, we don't want to add the exists filter. Why?
|
||||
// Say for example, user adds a filter `service.name != "redis"`, we can't interpret it
|
||||
@@ -162,6 +185,10 @@ func (f FilterOperator) IsNegativeOperator() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (f FilterOperator) Inverse() FilterOperator {
|
||||
return operatorInverseMapping[f]
|
||||
}
|
||||
|
||||
func (f FilterOperator) IsComparisonOperator() bool {
|
||||
switch f {
|
||||
case FilterOperatorGreaterThan, FilterOperatorGreaterThanOrEq, FilterOperatorLessThan, FilterOperatorLessThanOrEq:
|
||||
|
||||
@@ -90,6 +90,11 @@ func (n *JSONAccessNode) FieldPath() string {
|
||||
return n.Parent.Alias() + "." + key
|
||||
}
|
||||
|
||||
// Returns true if the current node is a non-nested path.
|
||||
func (n *JSONAccessNode) IsNonNestedPath() bool {
|
||||
return !strings.Contains(n.FieldPath(), ArraySep)
|
||||
}
|
||||
|
||||
func (n *JSONAccessNode) BranchesInOrder() []JSONAccessBranchType {
|
||||
return slices.SortedFunc(maps.Keys(n.Branches), func(a, b JSONAccessBranchType) int {
|
||||
return strings.Compare(b.StringValue(), a.StringValue())
|
||||
|
||||
@@ -8,65 +8,101 @@ package telemetrytypes
|
||||
// This represents the type information available in the test JSON structure.
|
||||
func TestJSONTypeSet() (map[string][]JSONDataType, MetadataStore) {
|
||||
types := map[string][]JSONDataType{
|
||||
"user.name": {String},
|
||||
"user.permissions": {ArrayString},
|
||||
"user.age": {Int64, String},
|
||||
"user.height": {Float64},
|
||||
"education": {ArrayJSON},
|
||||
"education[].name": {String},
|
||||
"education[].type": {String, Int64},
|
||||
"education[].internal_type": {String},
|
||||
"education[].metadata.location": {String},
|
||||
"education[].parameters": {ArrayFloat64, ArrayDynamic},
|
||||
"education[].duration": {String},
|
||||
"education[].mode": {String},
|
||||
"education[].year": {Int64},
|
||||
"education[].field": {String},
|
||||
"education[].awards": {ArrayDynamic, ArrayJSON},
|
||||
"education[].awards[].name": {String},
|
||||
"education[].awards[].rank": {Int64},
|
||||
"education[].awards[].medal": {String},
|
||||
"education[].awards[].type": {String},
|
||||
"education[].awards[].semester": {Int64},
|
||||
"education[].awards[].participated": {ArrayDynamic, ArrayJSON},
|
||||
"education[].awards[].participated[].type": {String},
|
||||
"education[].awards[].participated[].field": {String},
|
||||
"education[].awards[].participated[].project_type": {String},
|
||||
"education[].awards[].participated[].project_name": {String},
|
||||
"education[].awards[].participated[].race_type": {String},
|
||||
"education[].awards[].participated[].team_based": {Bool},
|
||||
"education[].awards[].participated[].team_name": {String},
|
||||
"education[].awards[].participated[].team": {ArrayJSON},
|
||||
"education[].awards[].participated[].members": {ArrayString},
|
||||
"education[].awards[].participated[].team[].name": {String},
|
||||
"education[].awards[].participated[].team[].branch": {String},
|
||||
"education[].awards[].participated[].team[].semester": {Int64},
|
||||
"interests": {ArrayJSON},
|
||||
"interests[].type": {String},
|
||||
"interests[].entities": {ArrayJSON},
|
||||
"interests[].entities.application_date": {String},
|
||||
"interests[].entities[].reviews": {ArrayJSON},
|
||||
"interests[].entities[].reviews[].given_by": {String},
|
||||
"interests[].entities[].reviews[].remarks": {String},
|
||||
"interests[].entities[].reviews[].weight": {Float64},
|
||||
"interests[].entities[].reviews[].passed": {Bool},
|
||||
"interests[].entities[].reviews[].type": {String},
|
||||
"interests[].entities[].reviews[].analysis_type": {Int64},
|
||||
"interests[].entities[].reviews[].entries": {ArrayJSON},
|
||||
"interests[].entities[].reviews[].entries[].subject": {String},
|
||||
"interests[].entities[].reviews[].entries[].status": {String},
|
||||
"interests[].entities[].reviews[].entries[].metadata": {ArrayJSON},
|
||||
"interests[].entities[].reviews[].entries[].metadata[].company": {String},
|
||||
"interests[].entities[].reviews[].entries[].metadata[].experience": {Int64},
|
||||
"interests[].entities[].reviews[].entries[].metadata[].unit": {String},
|
||||
"interests[].entities[].reviews[].entries[].metadata[].positions": {ArrayJSON},
|
||||
"interests[].entities[].reviews[].entries[].metadata[].positions[].name": {String},
|
||||
"interests[].entities[].reviews[].entries[].metadata[].positions[].duration": {Int64, Float64},
|
||||
"interests[].entities[].reviews[].entries[].metadata[].positions[].unit": {String},
|
||||
"interests[].entities[].reviews[].entries[].metadata[].positions[].ratings": {ArrayInt64, ArrayString},
|
||||
"message": {String},
|
||||
"tags": {ArrayString},
|
||||
|
||||
// ── user (primitives) ─────────────────────────────────────────────
|
||||
"user.name": {String},
|
||||
"user.permissions": {ArrayString},
|
||||
"user.age": {Int64, String}, // Int64/String ambiguity
|
||||
"user.height": {Float64},
|
||||
"user.active": {Bool}, // Bool — not IndexSupported
|
||||
|
||||
// Deeper non-array nesting (a.b.c — no array hops)
|
||||
"user.address.zip": {Int64},
|
||||
|
||||
// ── education[] ───────────────────────────────────────────────────
|
||||
// Pattern: x[].y
|
||||
"education": {ArrayJSON},
|
||||
"education[].name": {String},
|
||||
"education[].type": {String, Int64},
|
||||
"education[].year": {Int64},
|
||||
"education[].scores": {ArrayInt64},
|
||||
"education[].parameters": {ArrayFloat64, ArrayDynamic},
|
||||
|
||||
// Pattern: x[].y[]
|
||||
"education[].awards": {ArrayDynamic, ArrayJSON},
|
||||
|
||||
// Pattern: x[].y[].z
|
||||
"education[].awards[].name": {String},
|
||||
"education[].awards[].type": {String},
|
||||
"education[].awards[].semester": {Int64},
|
||||
|
||||
// Pattern: x[].y[].z[]
|
||||
"education[].awards[].participated": {ArrayDynamic, ArrayJSON},
|
||||
|
||||
// Pattern: x[].y[].z[].w
|
||||
"education[].awards[].participated[].members": {ArrayString},
|
||||
|
||||
// Pattern: x[].y[].z[].w[]
|
||||
"education[].awards[].participated[].team": {ArrayJSON},
|
||||
|
||||
// Pattern: x[].y[].z[].w[].v
|
||||
"education[].awards[].participated[].team[].branch": {String},
|
||||
|
||||
// ── interests[] ───────────────────────────────────────────────────
|
||||
"interests": {ArrayJSON},
|
||||
"interests[].entities": {ArrayJSON},
|
||||
"interests[].entities[].reviews": {ArrayJSON},
|
||||
"interests[].entities[].reviews[].entries": {ArrayJSON},
|
||||
"interests[].entities[].reviews[].entries[].metadata": {ArrayJSON},
|
||||
"interests[].entities[].reviews[].entries[].metadata[].positions": {ArrayJSON},
|
||||
"interests[].entities[].reviews[].entries[].metadata[].positions[].name": {String},
|
||||
"interests[].entities[].reviews[].entries[].metadata[].positions[].ratings": {ArrayInt64, ArrayString},
|
||||
"http-events": {ArrayJSON},
|
||||
"http-events[].request-info.host": {String},
|
||||
"ids": {ArrayDynamic},
|
||||
|
||||
// ── top-level primitives ──────────────────────────────────────────
|
||||
"message": {String},
|
||||
"http-status": {Int64, String}, // hyphen in root key, ambiguous
|
||||
|
||||
// ── top-level nested objects (no array hops) ───────────────────────
|
||||
"response.time-taken": {Float64}, // hyphen inside nested key
|
||||
}
|
||||
|
||||
return types, nil
|
||||
}
|
||||
|
||||
// TestIndexedPathEntry is a path + JSON type pair representing a field
|
||||
// backed by a ClickHouse skip index in the test data.
|
||||
//
|
||||
// Only non-array paths with IndexSupported types (String, Int64, Float64)
|
||||
// are valid entries — arrays and Bool cannot carry a skip index.
|
||||
//
|
||||
// The ColumnExpression for each entry is computed at test-setup time from
|
||||
// the access plan, since it depends on the column name (e.g. body_v2)
|
||||
// which is unknown to this package.
|
||||
type TestIndexedPathEntry struct {
|
||||
Path string
|
||||
Type JSONDataType
|
||||
}
|
||||
|
||||
// TestIndexedPaths lists path+type pairs from TestJSONTypeSet that are
|
||||
// backed by a JSON data type index. Test setup uses this to populate
|
||||
// key.Indexes after calling SetJSONAccessPlan.
|
||||
//
|
||||
// Intentionally excluded:
|
||||
// - user.active → Bool, IndexSupported=false
|
||||
var TestIndexedPaths = []TestIndexedPathEntry{
|
||||
// user primitives
|
||||
{Path: "user.name", Type: String},
|
||||
|
||||
// user.address — deeper non-array nesting
|
||||
{Path: "user.address.zip", Type: Int64},
|
||||
|
||||
// root-level with special characters
|
||||
{Path: "http-status", Type: Int64},
|
||||
{Path: "http-status", Type: String},
|
||||
|
||||
// root-level nested objects (no array hops)
|
||||
{Path: "response.time-taken", Type: Float64},
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ pytest_plugins = [
|
||||
"fixtures.notification_channel",
|
||||
"fixtures.alerts",
|
||||
"fixtures.cloudintegrations",
|
||||
"fixtures.jsontypeexporter",
|
||||
]
|
||||
|
||||
|
||||
|
||||
421
tests/integration/fixtures/jsontypeexporter.py
Normal file
421
tests/integration/fixtures/jsontypeexporter.py
Normal file
@@ -0,0 +1,421 @@
|
||||
"""
|
||||
Simpler version of jsontypeexporter for test fixtures.
|
||||
This exports JSON type metadata to the path_types table by parsing JSON bodies
|
||||
and extracting all paths with their types, similar to how the real jsontypeexporter works.
|
||||
"""
|
||||
import datetime
|
||||
import json
|
||||
from abc import ABC
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, Generator, List, Optional, Set, Union
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from fixtures import types
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fixtures.logs import Logs
|
||||
|
||||
|
||||
class JSONPathType(ABC):
|
||||
"""Represents a JSON path with its type information"""
|
||||
path: str
|
||||
type: str
|
||||
last_seen: np.uint64
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
path: str,
|
||||
type: str, # pylint: disable=redefined-builtin
|
||||
last_seen: Optional[datetime.datetime] = None,
|
||||
) -> None:
|
||||
self.path = path
|
||||
self.type = type
|
||||
if last_seen is None:
|
||||
last_seen = datetime.datetime.now()
|
||||
self.last_seen = np.uint64(int(last_seen.timestamp() * 1e9))
|
||||
|
||||
def np_arr(self) -> np.array:
|
||||
"""Return path type data as numpy array for database insertion"""
|
||||
return np.array([self.path, self.type, self.last_seen])
|
||||
|
||||
|
||||
# Constants matching jsontypeexporter
|
||||
ARRAY_SEPARATOR = "[]." # Used in paths like "education[].name"
|
||||
ARRAY_SUFFIX = "[]" # Used when traversing into array element objects
|
||||
|
||||
|
||||
def _infer_array_type_from_type_strings(types: List[str]) -> Optional[str]:
|
||||
"""
|
||||
Infer array type from a list of pre-classified type strings.
|
||||
Matches jsontypeexporter's inferArrayMask logic (v0.144.2+).
|
||||
|
||||
Type strings are: "JSON", "String", "Bool", "Float64", "Int64"
|
||||
|
||||
SuperTyping rules (matching Go inferArrayMask):
|
||||
- JSON alone → Array(JSON)
|
||||
- JSON + any primitive → Array(Dynamic)
|
||||
- String alone → Array(Nullable(String)); String + other → Array(Dynamic)
|
||||
- Float64 wins over Int64 and Bool
|
||||
- Int64 wins over Bool
|
||||
- Bool alone → Array(Nullable(Bool))
|
||||
"""
|
||||
if len(types) == 0:
|
||||
return None
|
||||
|
||||
unique = set(types)
|
||||
|
||||
has_json = "JSON" in unique
|
||||
# hasPrimitive mirrors Go: (hasJSON && len(unique) > 1) || (!hasJSON && len(unique) > 0)
|
||||
has_primitive = (has_json and len(unique) > 1) or (not has_json and len(unique) > 0)
|
||||
|
||||
if has_json:
|
||||
if not has_primitive:
|
||||
return "Array(JSON)"
|
||||
return "Array(Dynamic)"
|
||||
|
||||
# ---- Primitive Type Resolution (Float > Int > Bool) ----
|
||||
if "String" in unique:
|
||||
if len(unique) > 1:
|
||||
return "Array(Dynamic)"
|
||||
return "Array(Nullable(String))"
|
||||
|
||||
if "Float64" in unique:
|
||||
return "Array(Nullable(Float64))"
|
||||
if "Int64" in unique:
|
||||
return "Array(Nullable(Int64))"
|
||||
if "Bool" in unique:
|
||||
return "Array(Nullable(Bool))"
|
||||
|
||||
return "Array(Dynamic)"
|
||||
|
||||
|
||||
def _infer_array_type(elements: List[Any]) -> Optional[str]:
|
||||
"""
|
||||
Infer array type from raw Python list elements.
|
||||
Classifies each element then delegates to _infer_array_type_from_type_strings.
|
||||
"""
|
||||
if len(elements) == 0:
|
||||
return None
|
||||
|
||||
types = []
|
||||
for elem in elements:
|
||||
if elem is None:
|
||||
continue
|
||||
if isinstance(elem, dict):
|
||||
types.append("JSON")
|
||||
elif isinstance(elem, str):
|
||||
types.append("String")
|
||||
elif isinstance(elem, bool): # must be before int (bool is subclass of int)
|
||||
types.append("Bool")
|
||||
elif isinstance(elem, float):
|
||||
types.append("Float64")
|
||||
elif isinstance(elem, int):
|
||||
types.append("Int64")
|
||||
|
||||
return _infer_array_type_from_type_strings(types)
|
||||
|
||||
|
||||
def _python_type_to_clickhouse_type(value: Any) -> str:
|
||||
"""
|
||||
Convert Python type to ClickHouse JSON type string.
|
||||
Maps Python types to ClickHouse JSON data types.
|
||||
"""
|
||||
if value is None:
|
||||
return "String" # Default for null values
|
||||
|
||||
if isinstance(value, bool):
|
||||
return "Bool"
|
||||
elif isinstance(value, int):
|
||||
return "Int64"
|
||||
elif isinstance(value, float):
|
||||
return "Float64"
|
||||
elif isinstance(value, str):
|
||||
return "String"
|
||||
elif isinstance(value, list):
|
||||
# Use the sophisticated array type inference
|
||||
array_type = _infer_array_type(value)
|
||||
return array_type if array_type else "Array(Dynamic)"
|
||||
elif isinstance(value, dict):
|
||||
return "JSON"
|
||||
else:
|
||||
return "String" # Default fallback
|
||||
|
||||
|
||||
def _extract_json_paths(
|
||||
obj: Any,
|
||||
current_path: str = "",
|
||||
path_types: Optional[Dict[str, Set[str]]] = None,
|
||||
level: int = 0,
|
||||
) -> Dict[str, Set[str]]:
|
||||
"""
|
||||
Recursively extract all paths and their types from a JSON object.
|
||||
Matches jsontypeexporter's analyzePValue logic.
|
||||
|
||||
Args:
|
||||
obj: The JSON object to traverse
|
||||
current_path: Current path being built (e.g., "user.name")
|
||||
path_types: Dictionary mapping paths to sets of types found
|
||||
level: Current nesting level (for depth limiting)
|
||||
|
||||
Returns:
|
||||
Dictionary mapping paths to sets of type strings
|
||||
"""
|
||||
if path_types is None:
|
||||
path_types = {}
|
||||
|
||||
if obj is None:
|
||||
if current_path:
|
||||
if current_path not in path_types:
|
||||
path_types[current_path] = set()
|
||||
path_types[current_path].add("String") # Null defaults to String
|
||||
return path_types
|
||||
|
||||
if isinstance(obj, dict):
|
||||
# For objects, add the object itself and recurse into keys
|
||||
if current_path:
|
||||
if current_path not in path_types:
|
||||
path_types[current_path] = set()
|
||||
path_types[current_path].add("JSON")
|
||||
|
||||
for key, value in obj.items():
|
||||
# Build the path for this key
|
||||
if current_path:
|
||||
new_path = f"{current_path}.{key}"
|
||||
else:
|
||||
new_path = key
|
||||
|
||||
# Recurse into the value
|
||||
_extract_json_paths(value, new_path, path_types, level + 1)
|
||||
|
||||
elif isinstance(obj, list):
|
||||
# Skip empty arrays
|
||||
if len(obj) == 0:
|
||||
return path_types
|
||||
|
||||
# Collect types from array elements (matching Go: types := make([]pcommon.ValueType, 0, s.Len()))
|
||||
types = []
|
||||
|
||||
for item in obj:
|
||||
if isinstance(item, dict):
|
||||
# When traversing into array element objects, use ArraySuffix ([])
|
||||
# This matches: prefix+ArraySuffix in the Go code
|
||||
# Example: if current_path is "education", we use "education[]" to traverse into objects
|
||||
array_prefix = current_path + ARRAY_SUFFIX if current_path else ""
|
||||
for key, value in item.items():
|
||||
if array_prefix:
|
||||
# Use array separator: education[].name
|
||||
array_path = f"{array_prefix}.{key}"
|
||||
else:
|
||||
array_path = key
|
||||
# Recurse without increasing level (matching Go behavior)
|
||||
_extract_json_paths(value, array_path, path_types, level)
|
||||
types.append("JSON")
|
||||
elif isinstance(item, list):
|
||||
# Arrays inside arrays are not supported - skip the whole path
|
||||
# Matching Go: e.logger.Error("arrays inside arrays are not supported!", ...); return nil
|
||||
return path_types
|
||||
elif isinstance(item, str):
|
||||
types.append("String")
|
||||
elif isinstance(item, bool):
|
||||
types.append("Bool")
|
||||
elif isinstance(item, float):
|
||||
types.append("Float64")
|
||||
elif isinstance(item, int):
|
||||
types.append("Int64")
|
||||
|
||||
# Infer array type from collected types (matching Go: if mask := inferArrayMask(types); mask != 0)
|
||||
if len(types) > 0:
|
||||
array_type = _infer_array_type_from_type_strings(types)
|
||||
if array_type and current_path:
|
||||
if current_path not in path_types:
|
||||
path_types[current_path] = set()
|
||||
path_types[current_path].add(array_type)
|
||||
|
||||
else:
|
||||
# Primitive value (string, number, bool)
|
||||
if current_path:
|
||||
if current_path not in path_types:
|
||||
path_types[current_path] = set()
|
||||
obj_type = _python_type_to_clickhouse_type(obj)
|
||||
path_types[current_path].add(obj_type)
|
||||
|
||||
return path_types
|
||||
|
||||
|
||||
def _parse_json_bodies_and_extract_paths(
|
||||
json_bodies: List[str],
|
||||
timestamp: Optional[datetime.datetime] = None,
|
||||
) -> List[JSONPathType]:
|
||||
"""
|
||||
Parse JSON bodies and extract all paths with their types.
|
||||
This mimics the behavior of jsontypeexporter.
|
||||
|
||||
Args:
|
||||
json_bodies: List of JSON body strings to parse
|
||||
timestamp: Timestamp to use for last_seen (defaults to now)
|
||||
|
||||
Returns:
|
||||
List of JSONPathType objects with all discovered paths and types
|
||||
"""
|
||||
if timestamp is None:
|
||||
timestamp = datetime.datetime.now()
|
||||
|
||||
# Aggregate all paths and their types across all JSON bodies
|
||||
all_path_types: Dict[str, Set[str]] = {}
|
||||
|
||||
for json_body in json_bodies:
|
||||
try:
|
||||
parsed = json.loads(json_body)
|
||||
_extract_json_paths(parsed, "", all_path_types, level=0)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
# Skip invalid JSON
|
||||
continue
|
||||
|
||||
# Convert to list of JSONPathType objects
|
||||
# Each path can have multiple types, so we create one JSONPathType per type
|
||||
path_type_objects: List[JSONPathType] = []
|
||||
for path, types_set in all_path_types.items():
|
||||
for type_str in types_set:
|
||||
path_type_objects.append(
|
||||
JSONPathType(path=path, type=type_str, last_seen=timestamp)
|
||||
)
|
||||
|
||||
return path_type_objects
|
||||
|
||||
|
||||
@pytest.fixture(name="export_json_types", scope="function")
|
||||
def export_json_types(
|
||||
clickhouse: types.TestContainerClickhouse,
|
||||
request: pytest.FixtureRequest, # To access migrator fixture
|
||||
) -> Generator[Callable[[Union[List[JSONPathType], List[str], List[Any]]], None], Any, None]:
|
||||
"""
|
||||
Fixture for exporting JSON type metadata to the path_types table.
|
||||
This is a simpler version of jsontypeexporter for test fixtures.
|
||||
|
||||
The function can accept:
|
||||
1. List of JSONPathType objects (manual specification)
|
||||
2. List of JSON body strings (auto-extract paths)
|
||||
3. List of Logs objects (extract from body_json field)
|
||||
|
||||
Usage examples:
|
||||
# Manual specification
|
||||
export_json_types([
|
||||
JSONPathType(path="user.name", type="String"),
|
||||
JSONPathType(path="user.age", type="Int64"),
|
||||
])
|
||||
|
||||
# Auto-extract from JSON strings
|
||||
export_json_types([
|
||||
'{"user": {"name": "alice", "age": 25}}',
|
||||
'{"user": {"name": "bob", "age": 30}}',
|
||||
])
|
||||
|
||||
# Auto-extract from Logs objects
|
||||
export_json_types(logs_list)
|
||||
"""
|
||||
# Ensure migrator has run to create the table
|
||||
try:
|
||||
request.getfixturevalue("migrator")
|
||||
except Exception:
|
||||
# If migrator fixture is not available, that's okay - table might already exist
|
||||
pass
|
||||
|
||||
def _export_json_types(
|
||||
data: Union[List[JSONPathType], List[str], List[Any]], # List[Logs] but avoiding circular import
|
||||
) -> None:
|
||||
"""
|
||||
Export JSON type metadata to signoz_metadata.distributed_json_path_types table.
|
||||
This table stores path and type information for body JSON fields.
|
||||
"""
|
||||
path_types: List[JSONPathType] = []
|
||||
|
||||
if len(data) == 0:
|
||||
return
|
||||
|
||||
# Determine input type and convert to JSONPathType list
|
||||
first_item = data[0]
|
||||
|
||||
if isinstance(first_item, JSONPathType):
|
||||
# Already JSONPathType objects
|
||||
path_types = data # type: ignore
|
||||
elif isinstance(first_item, str):
|
||||
# List of JSON strings - parse and extract paths
|
||||
path_types = _parse_json_bodies_and_extract_paths(data) # type: ignore
|
||||
else:
|
||||
# Assume it's a list of Logs objects - extract body_v2
|
||||
json_bodies: List[str] = []
|
||||
for log in data: # type: ignore
|
||||
# Try to get body_v2 attribute
|
||||
if hasattr(log, "body_v2") and log.body_v2:
|
||||
json_bodies.append(log.body_v2)
|
||||
elif hasattr(log, "body") and log.body:
|
||||
# Fallback to body if body_v2 not available
|
||||
try:
|
||||
# Try to parse as JSON
|
||||
json.loads(log.body)
|
||||
json_bodies.append(log.body)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
|
||||
if json_bodies:
|
||||
path_types = _parse_json_bodies_and_extract_paths(json_bodies)
|
||||
|
||||
if len(path_types) == 0:
|
||||
return
|
||||
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_metadata",
|
||||
table="distributed_json_path_types",
|
||||
data=[path_type.np_arr() for path_type in path_types],
|
||||
column_names=[
|
||||
"path",
|
||||
"type",
|
||||
"last_seen",
|
||||
],
|
||||
)
|
||||
|
||||
yield _export_json_types
|
||||
|
||||
# Cleanup - truncate the local table after tests (following pattern from logs fixture)
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_metadata.json_path_types ON CLUSTER '{clickhouse.env['SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER']}' SYNC"
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(name="export_promoted_paths", scope="function")
|
||||
def export_promoted_paths(
|
||||
clickhouse: types.TestContainerClickhouse,
|
||||
request: pytest.FixtureRequest, # To access migrator fixture
|
||||
) -> Generator[Callable[[List[str]], None], Any, None]:
|
||||
"""
|
||||
Fixture for exporting promoted JSON paths to the promoted paths table.
|
||||
"""
|
||||
# Ensure migrator has run to create the table
|
||||
try:
|
||||
request.getfixturevalue("migrator")
|
||||
except Exception:
|
||||
# If migrator fixture is not available, that's okay - table might already exist
|
||||
pass
|
||||
|
||||
def _export_promoted_paths(paths: List[str]) -> None:
|
||||
if len(paths) == 0:
|
||||
return
|
||||
|
||||
now_ms = int(datetime.datetime.now().timestamp() * 1000)
|
||||
rows = [(path, now_ms) for path in paths]
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_metadata",
|
||||
table="distributed_json_promoted_paths",
|
||||
data=rows,
|
||||
column_names=[
|
||||
"path",
|
||||
"created_at",
|
||||
],
|
||||
)
|
||||
|
||||
yield _export_promoted_paths
|
||||
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_metadata.json_promoted_paths ON CLUSTER '{clickhouse.env['SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER']}' SYNC"
|
||||
)
|
||||
@@ -122,6 +122,8 @@ class Logs(ABC):
|
||||
resources: dict[str, Any] = {},
|
||||
attributes: dict[str, Any] = {},
|
||||
body: str = "default body",
|
||||
body_v2: Optional[str] = None,
|
||||
body_promoted: Optional[str] = None,
|
||||
severity_text: str = "INFO",
|
||||
trace_id: str = "",
|
||||
span_id: str = "",
|
||||
@@ -167,6 +169,33 @@ class Logs(ABC):
|
||||
# Set body
|
||||
self.body = body
|
||||
|
||||
# Set body_v2 - if body is JSON, parse and stringify it, otherwise use empty string
|
||||
# ClickHouse accepts String input for JSON column
|
||||
if body_v2 is not None:
|
||||
self.body_v2 = body_v2
|
||||
else:
|
||||
# Try to parse body as JSON; if successful use it directly,
|
||||
# otherwise wrap as {"message": body} matching the normalize operator behavior.
|
||||
try:
|
||||
json.loads(body)
|
||||
self.body_v2 = body
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
self.body_v2 = json.dumps({"message": body})
|
||||
|
||||
# Set body_promoted - must be valid JSON
|
||||
# Tests will explicitly pass promoted column's content, but we validate it
|
||||
if body_promoted is not None:
|
||||
# Validate that it's valid JSON
|
||||
try:
|
||||
json.loads(body_promoted)
|
||||
self.body_promoted = body_promoted
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
# If invalid, default to empty JSON object
|
||||
self.body_promoted = "{}"
|
||||
else:
|
||||
# Default to empty JSON object (valid JSON)
|
||||
self.body_promoted = "{}"
|
||||
|
||||
# Process resources and attributes
|
||||
self.resources_string = {k: str(v) for k, v in resources.items()}
|
||||
self.resource_json = (
|
||||
@@ -326,6 +355,8 @@ class Logs(ABC):
|
||||
self.severity_text,
|
||||
self.severity_number,
|
||||
self.body,
|
||||
self.body_v2,
|
||||
self.body_promoted,
|
||||
self.attributes_string,
|
||||
self.attributes_number,
|
||||
self.attributes_bool,
|
||||
@@ -470,6 +501,8 @@ def insert_logs(
|
||||
"severity_text",
|
||||
"severity_number",
|
||||
"body",
|
||||
"body_v2",
|
||||
"body_promoted",
|
||||
"attributes_string",
|
||||
"attributes_number",
|
||||
"attributes_bool",
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from typing import Optional
|
||||
|
||||
import docker
|
||||
import pytest
|
||||
from testcontainers.core.container import Network
|
||||
@@ -8,27 +10,32 @@ from fixtures.logger import setup_logger
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
|
||||
@pytest.fixture(name="migrator", scope="package")
|
||||
def migrator(
|
||||
def create_migrator(
|
||||
network: Network,
|
||||
clickhouse: types.TestContainerClickhouse,
|
||||
request: pytest.FixtureRequest,
|
||||
pytestconfig: pytest.Config,
|
||||
cache_key: str = "migrator",
|
||||
env_overrides: Optional[dict] = None,
|
||||
) -> types.Operation:
|
||||
"""
|
||||
Package-scoped fixture for running schema migrations.
|
||||
Factory function for running schema migrations.
|
||||
Accepts optional env_overrides to customize the migrator environment.
|
||||
"""
|
||||
|
||||
def create() -> None:
|
||||
version = request.config.getoption("--schema-migrator-version")
|
||||
client = docker.from_env()
|
||||
|
||||
environment = dict(env_overrides) if env_overrides else {}
|
||||
|
||||
container = client.containers.run(
|
||||
image=f"signoz/signoz-schema-migrator:{version}",
|
||||
command=f"sync --replication=true --cluster-name=cluster --up= --dsn={clickhouse.env["SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN"]}",
|
||||
detach=True,
|
||||
auto_remove=False,
|
||||
network=network.id,
|
||||
environment=environment,
|
||||
)
|
||||
|
||||
result = container.wait()
|
||||
@@ -47,6 +54,7 @@ def migrator(
|
||||
detach=True,
|
||||
auto_remove=False,
|
||||
network=network.id,
|
||||
environment=environment,
|
||||
)
|
||||
|
||||
result = container.wait()
|
||||
@@ -59,7 +67,7 @@ def migrator(
|
||||
|
||||
container.remove()
|
||||
|
||||
return types.Operation(name="migrator")
|
||||
return types.Operation(name=cache_key)
|
||||
|
||||
def delete(_: types.Operation) -> None:
|
||||
pass
|
||||
@@ -70,9 +78,27 @@ def migrator(
|
||||
return dev.wrap(
|
||||
request,
|
||||
pytestconfig,
|
||||
"migrator",
|
||||
cache_key,
|
||||
lambda: types.Operation(name=""),
|
||||
create,
|
||||
delete,
|
||||
restore,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(name="migrator", scope="package")
|
||||
def migrator(
|
||||
network: Network,
|
||||
clickhouse: types.TestContainerClickhouse,
|
||||
request: pytest.FixtureRequest,
|
||||
pytestconfig: pytest.Config,
|
||||
) -> types.Operation:
|
||||
"""
|
||||
Package-scoped fixture for running schema migrations.
|
||||
"""
|
||||
return create_migrator(
|
||||
network=network,
|
||||
clickhouse=clickhouse,
|
||||
request=request,
|
||||
pytestconfig=pytestconfig,
|
||||
)
|
||||
|
||||
1143
tests/integration/src/querier_json_body/01_logs_json_body_new_qb.py
Normal file
1143
tests/integration/src/querier_json_body/01_logs_json_body_new_qb.py
Normal file
File diff suppressed because it is too large
Load Diff
0
tests/integration/src/querier_json_body/__init__.py
Normal file
0
tests/integration/src/querier_json_body/__init__.py
Normal file
70
tests/integration/src/querier_json_body/conftest.py
Normal file
70
tests/integration/src/querier_json_body/conftest.py
Normal file
@@ -0,0 +1,70 @@
|
||||
import pytest
|
||||
from testcontainers.core.container import Network
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.migrator import create_migrator
|
||||
from fixtures.signoz import create_signoz
|
||||
|
||||
UNSUPPORTED_CLICKHOUSE_VERSIONS = {"25.5.6"}
|
||||
|
||||
|
||||
def pytest_collection_modifyitems(
|
||||
config: pytest.Config, items: list[pytest.Item]
|
||||
) -> None:
|
||||
version = config.getoption("--clickhouse-version")
|
||||
if version in UNSUPPORTED_CLICKHOUSE_VERSIONS:
|
||||
skip = pytest.mark.skip(
|
||||
reason=f"JSON body QB tests require ClickHouse > {version}"
|
||||
)
|
||||
for item in items:
|
||||
item.add_marker(skip)
|
||||
|
||||
|
||||
@pytest.fixture(name="migrator", scope="package")
|
||||
def migrator_json(
|
||||
network: Network,
|
||||
clickhouse: types.TestContainerClickhouse,
|
||||
request: pytest.FixtureRequest,
|
||||
pytestconfig: pytest.Config,
|
||||
) -> types.Operation:
|
||||
"""
|
||||
Package-scoped migrator with ENABLE_LOGS_MIGRATIONS_V2=1.
|
||||
"""
|
||||
return create_migrator(
|
||||
network=network,
|
||||
clickhouse=clickhouse,
|
||||
request=request,
|
||||
pytestconfig=pytestconfig,
|
||||
cache_key="migrator-json-body",
|
||||
env_overrides={
|
||||
"ENABLE_LOGS_MIGRATIONS_V2": "1",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(name="signoz", scope="package")
|
||||
def signoz_json_body(
|
||||
network: Network,
|
||||
zeus: types.TestContainerDocker,
|
||||
gateway: types.TestContainerDocker,
|
||||
sqlstore: types.TestContainerSQL,
|
||||
clickhouse: types.TestContainerClickhouse,
|
||||
request: pytest.FixtureRequest,
|
||||
pytestconfig: pytest.Config,
|
||||
) -> types.SigNoz:
|
||||
"""
|
||||
Package-scoped fixture for SigNoz with BODY_JSON_QUERY_ENABLED=true.
|
||||
"""
|
||||
return create_signoz(
|
||||
network=network,
|
||||
zeus=zeus,
|
||||
gateway=gateway,
|
||||
sqlstore=sqlstore,
|
||||
clickhouse=clickhouse,
|
||||
request=request,
|
||||
pytestconfig=pytestconfig,
|
||||
cache_key="signoz-json-body",
|
||||
env_overrides={
|
||||
"BODY_JSON_QUERY_ENABLED": "true",
|
||||
},
|
||||
)
|
||||
Reference in New Issue
Block a user