Compare commits

..

4 Commits

Author SHA1 Message Date
swapnil-signoz
79c152bc65 feat: adding types changes and openapi spec 2026-04-07 18:11:59 +05:30
Abhi kumar
7c1fe82043 refactor(tooltip): pin active series to header and fix spacing in uPlot chart tooltip (#10862)
* fix: fixed tooltip spacing and minor ui fixes

* chore: minor fix

* chore: changed opacity

* chore: broke down tooltip component

* chore: sorted the tooltip content

* chore: moved scss to modules

* chore: pr review comments

* chore: removed sorting values

* chore: pr review comments

* rename classname

Co-authored-by: Ashwin Bhatkal <ashwin96@gmail.com>

* chore: removed tests

---------

Co-authored-by: Ashwin Bhatkal <ashwin96@gmail.com>
2026-04-07 11:49:26 +00:00
Vinicius Lourenço
d4dbbceab7 fix(app-routes): use redirect component to avoid flash old routes due to useEffect (#10802)
Some checks failed
build-staging / staging (push) Has been cancelled
build-staging / prepare (push) Has been cancelled
build-staging / js-build (push) Has been cancelled
build-staging / go-build (push) Has been cancelled
Release Drafter / update_release_draft (push) Has been cancelled
2026-04-07 11:10:48 +00:00
Vikrant Gupta
918cf4dfe5 fix(authz): improve error messages (#10856) 2026-04-07 08:34:53 +00:00
48 changed files with 1888 additions and 3006 deletions

View File

@@ -405,11 +405,11 @@ components:
type: object
CloudintegrationtypesAWSCollectionStrategy:
properties:
aws_logs:
logs:
$ref: '#/components/schemas/CloudintegrationtypesAWSLogsStrategy'
aws_metrics:
metrics:
$ref: '#/components/schemas/CloudintegrationtypesAWSMetricsStrategy'
s3_buckets:
s3Buckets:
additionalProperties:
items:
type: string
@@ -449,12 +449,12 @@ components:
type: object
CloudintegrationtypesAWSLogsStrategy:
properties:
cloudwatch_logs_subscriptions:
cloudwatchLogsSubscriptions:
items:
properties:
filter_pattern:
filterPattern:
type: string
log_group_name_prefix:
logGroupNamePrefix:
type: string
type: object
nullable: true
@@ -462,7 +462,7 @@ components:
type: object
CloudintegrationtypesAWSMetricsStrategy:
properties:
cloudwatch_metric_stream_filters:
cloudwatchMetricStreamFilters:
items:
properties:
MetricNames:
@@ -486,7 +486,7 @@ components:
properties:
enabled:
type: boolean
s3_buckets:
s3Buckets:
additionalProperties:
items:
type: string
@@ -561,6 +561,26 @@ components:
nullable: true
type: array
type: object
CloudintegrationtypesCloudIntegrationService:
nullable: true
properties:
cloudIntegrationId:
type: string
config:
$ref: '#/components/schemas/CloudintegrationtypesServiceConfig'
createdAt:
format: date-time
type: string
id:
type: string
type:
$ref: '#/components/schemas/CloudintegrationtypesServiceID'
updatedAt:
format: date-time
type: string
required:
- id
type: object
CloudintegrationtypesCollectedLogAttribute:
properties:
name:
@@ -596,6 +616,16 @@ components:
- aws
type: object
CloudintegrationtypesConnectionArtifactRequest:
properties:
config:
$ref: '#/components/schemas/CloudintegrationtypesConnectionArtifactRequestConfig'
credentials:
$ref: '#/components/schemas/CloudintegrationtypesSignozCredentials'
required:
- config
- credentials
type: object
CloudintegrationtypesConnectionArtifactRequestConfig:
properties:
aws:
$ref: '#/components/schemas/CloudintegrationtypesAWSConnectionArtifactRequest'
@@ -694,11 +724,54 @@ components:
type: string
type: array
telemetry:
$ref: '#/components/schemas/CloudintegrationtypesAWSCollectionStrategy'
$ref: '#/components/schemas/CloudintegrationtypesOldAWSCollectionStrategy'
required:
- enabled_regions
- telemetry
type: object
CloudintegrationtypesOldAWSCollectionStrategy:
properties:
aws_logs:
$ref: '#/components/schemas/CloudintegrationtypesOldAWSLogsStrategy'
aws_metrics:
$ref: '#/components/schemas/CloudintegrationtypesOldAWSMetricsStrategy'
provider:
type: string
s3_buckets:
additionalProperties:
items:
type: string
type: array
type: object
type: object
CloudintegrationtypesOldAWSLogsStrategy:
properties:
cloudwatch_logs_subscriptions:
items:
properties:
filter_pattern:
type: string
log_group_name_prefix:
type: string
type: object
nullable: true
type: array
type: object
CloudintegrationtypesOldAWSMetricsStrategy:
properties:
cloudwatch_metric_stream_filters:
items:
properties:
MetricNames:
items:
type: string
type: array
Namespace:
type: string
type: object
nullable: true
type: array
type: object
CloudintegrationtypesPostableAgentCheckInRequest:
properties:
account_id:
@@ -727,6 +800,8 @@ components:
properties:
assets:
$ref: '#/components/schemas/CloudintegrationtypesAssets'
cloudIntegrationService:
$ref: '#/components/schemas/CloudintegrationtypesCloudIntegrationService'
dataCollected:
$ref: '#/components/schemas/CloudintegrationtypesDataCollected'
icon:
@@ -735,9 +810,7 @@ components:
type: string
overview:
type: string
serviceConfig:
$ref: '#/components/schemas/CloudintegrationtypesServiceConfig'
supported_signals:
supportedSignals:
$ref: '#/components/schemas/CloudintegrationtypesSupportedSignals'
telemetryCollectionStrategy:
$ref: '#/components/schemas/CloudintegrationtypesCollectionStrategy'
@@ -749,9 +822,10 @@ components:
- icon
- overview
- assets
- supported_signals
- supportedSignals
- dataCollected
- telemetryCollectionStrategy
- cloudIntegrationService
type: object
CloudintegrationtypesServiceConfig:
properties:
@@ -760,6 +834,22 @@ components:
required:
- aws
type: object
CloudintegrationtypesServiceID:
enum:
- alb
- api-gateway
- dynamodb
- ec2
- ecs
- eks
- elasticache
- lambda
- msk
- rds
- s3sync
- sns
- sqs
type: string
CloudintegrationtypesServiceMetadata:
properties:
enabled:
@@ -776,6 +866,22 @@ components:
- icon
- enabled
type: object
CloudintegrationtypesSignozCredentials:
properties:
ingestionKey:
type: string
ingestionUrl:
type: string
sigNozApiKey:
type: string
sigNozApiURL:
type: string
required:
- sigNozApiURL
- sigNozApiKey
- ingestionUrl
- ingestionKey
type: object
CloudintegrationtypesSupportedSignals:
properties:
logs:
@@ -1098,78 +1204,6 @@ components:
enabled:
type: boolean
type: object
InframonitoringtypesHostRecord:
properties:
cpu:
format: double
type: number
diskUsage:
format: double
type: number
hostName:
type: string
load15:
format: double
type: number
memory:
format: double
type: number
meta:
additionalProperties: {}
nullable: true
type: object
status:
type: string
wait:
format: double
type: number
type: object
InframonitoringtypesHostStatus:
enum:
- active
- inactive
- ""
type: string
InframonitoringtypesHostsListRequest:
properties:
end:
format: int64
type: integer
filter:
$ref: '#/components/schemas/Querybuildertypesv5Filter'
filterByStatus:
$ref: '#/components/schemas/InframonitoringtypesHostStatus'
groupBy:
items:
$ref: '#/components/schemas/Querybuildertypesv5GroupByKey'
nullable: true
type: array
limit:
type: integer
offset:
type: integer
orderBy:
$ref: '#/components/schemas/Querybuildertypesv5OrderBy'
start:
format: int64
type: integer
type: object
InframonitoringtypesHostsListResponse:
properties:
endTimeBeforeRetention:
type: boolean
records:
items:
$ref: '#/components/schemas/InframonitoringtypesHostRecord'
nullable: true
type: array
sentAnyMetricsData:
type: boolean
total:
type: integer
type:
type: string
type: object
MetricsexplorertypesInspectMetricsRequest:
properties:
end:
@@ -3466,6 +3500,61 @@ paths:
summary: Update account
tags:
- cloudintegration
/api/v1/cloud_integrations/{cloud_provider}/accounts/{id}/services/{service_id}:
put:
deprecated: false
description: This endpoint updates a service for the specified cloud provider
operationId: UpdateService
parameters:
- in: path
name: cloud_provider
required: true
schema:
type: string
- in: path
name: id
required: true
schema:
type: string
- in: path
name: service_id
required: true
schema:
type: string
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/CloudintegrationtypesUpdatableService'
responses:
"204":
description: No Content
"401":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Unauthorized
"403":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Forbidden
"500":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Internal Server Error
security:
- api_key:
- ADMIN
- tokenizer:
- ADMIN
summary: Update service
tags:
- cloudintegration
/api/v1/cloud_integrations/{cloud_provider}/accounts/check_in:
post:
deprecated: false
@@ -3523,6 +3612,59 @@ paths:
summary: Agent check-in
tags:
- cloudintegration
/api/v1/cloud_integrations/{cloud_provider}/credentials:
get:
deprecated: false
description: This endpoint retrieves the connection credentials required for
integration
operationId: GetConnectionCredentials
parameters:
- in: path
name: cloud_provider
required: true
schema:
type: string
responses:
"200":
content:
application/json:
schema:
properties:
data:
$ref: '#/components/schemas/CloudintegrationtypesSignozCredentials'
status:
type: string
required:
- status
- data
type: object
description: OK
"401":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Unauthorized
"403":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Forbidden
"500":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Internal Server Error
security:
- api_key:
- ADMIN
- tokenizer:
- ADMIN
summary: Get connection credentials
tags:
- cloudintegration
/api/v1/cloud_integrations/{cloud_provider}/services:
get:
deprecated: false
@@ -3633,55 +3775,6 @@ paths:
summary: Get service
tags:
- cloudintegration
put:
deprecated: false
description: This endpoint updates a service for the specified cloud provider
operationId: UpdateService
parameters:
- in: path
name: cloud_provider
required: true
schema:
type: string
- in: path
name: service_id
required: true
schema:
type: string
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/CloudintegrationtypesUpdatableService'
responses:
"204":
description: No Content
"401":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Unauthorized
"403":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Forbidden
"500":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Internal Server Error
security:
- api_key:
- ADMIN
- tokenizer:
- ADMIN
summary: Update service
tags:
- cloudintegration
/api/v1/complete/google:
get:
deprecated: false
@@ -7252,64 +7345,6 @@ paths:
summary: Health check
tags:
- health
/api/v2/infra-monitoring/hosts/list:
post:
deprecated: false
description: This endpoint returns a list of hosts along with other information
for each of them
operationId: HostsList
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/InframonitoringtypesHostsListRequest'
responses:
"200":
content:
application/json:
schema:
properties:
data:
$ref: '#/components/schemas/InframonitoringtypesHostsListResponse'
status:
type: string
required:
- status
- data
type: object
description: OK
"400":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Bad Request
"401":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Unauthorized
"403":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Forbidden
"500":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Internal Server Error
security:
- api_key:
- VIEWER
- tokenizer:
- VIEWER
summary: List Hosts for Infra Monitoring
tags:
- infra-monitoring
/api/v2/livez:
get:
deprecated: false

View File

@@ -1,4 +1,4 @@
import { ReactChild, useCallback, useEffect, useMemo, useState } from 'react';
import { ReactChild, useCallback, useMemo } from 'react';
import { matchPath, Redirect, useLocation } from 'react-router-dom';
import getLocalStorageApi from 'api/browser/localstorage/get';
import setLocalStorageApi from 'api/browser/localstorage/set';
@@ -8,12 +8,10 @@ import { LOCALSTORAGE } from 'constants/localStorage';
import { ORG_PREFERENCES } from 'constants/orgPreferences';
import ROUTES from 'constants/routes';
import { useGetTenantLicense } from 'hooks/useGetTenantLicense';
import history from 'lib/history';
import { isEmpty } from 'lodash-es';
import { useAppContext } from 'providers/App/App';
import { LicensePlatform, LicenseState } from 'types/api/licensesV3/getActive';
import { OrgPreference } from 'types/api/preferences/preference';
import { Organization } from 'types/api/user/getOrganization';
import { USER_ROLES } from 'types/roles';
import { routePermission } from 'utils/permission';
@@ -25,6 +23,7 @@ import routes, {
SUPPORT_ROUTE,
} from './routes';
// eslint-disable-next-line sonarjs/cognitive-complexity
function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
const location = useLocation();
const { pathname } = location;
@@ -57,7 +56,12 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
const currentRoute = mapRoutes.get('current');
const { isCloudUser: isCloudUserVal } = useGetTenantLicense();
const [orgData, setOrgData] = useState<Organization | undefined>(undefined);
const orgData = useMemo(() => {
if (org && org.length > 0 && org[0].id !== undefined) {
return org[0];
}
return undefined;
}, [org]);
const { data: usersData, isFetching: isFetchingUsers } = useListUsers({
query: {
@@ -75,214 +79,7 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
return remainingUsers.length === 1;
}, [usersData?.data]);
useEffect(() => {
if (
isCloudUserVal &&
!isFetchingOrgPreferences &&
orgPreferences &&
!isFetchingUsers &&
usersData &&
usersData.data
) {
const isOnboardingComplete = orgPreferences?.find(
(preference: OrgPreference) =>
preference.name === ORG_PREFERENCES.ORG_ONBOARDING,
)?.value;
// Don't redirect to onboarding if workspace has issues (blocked, suspended, or restricted)
// User needs access to settings/billing to fix payment issues
const isWorkspaceBlocked = trialInfo?.workSpaceBlock;
const isWorkspaceSuspended = activeLicense?.state === LicenseState.DEFAULTED;
const isWorkspaceAccessRestricted =
activeLicense?.state === LicenseState.TERMINATED ||
activeLicense?.state === LicenseState.EXPIRED ||
activeLicense?.state === LicenseState.CANCELLED;
const hasWorkspaceIssue =
isWorkspaceBlocked || isWorkspaceSuspended || isWorkspaceAccessRestricted;
if (hasWorkspaceIssue) {
return;
}
const isFirstUser = checkFirstTimeUser();
if (
isFirstUser &&
!isOnboardingComplete &&
// if the current route is allowed to be overriden by org onboarding then only do the same
!ROUTES_NOT_TO_BE_OVERRIDEN.includes(pathname)
) {
history.push(ROUTES.ONBOARDING);
}
}
}, [
checkFirstTimeUser,
isCloudUserVal,
isFetchingOrgPreferences,
isFetchingUsers,
orgPreferences,
usersData,
pathname,
trialInfo?.workSpaceBlock,
activeLicense?.state,
]);
const navigateToWorkSpaceBlocked = useCallback((): void => {
const isRouteEnabledForWorkspaceBlockedState =
isAdmin &&
(pathname === ROUTES.SETTINGS ||
pathname === ROUTES.ORG_SETTINGS ||
pathname === ROUTES.MEMBERS_SETTINGS ||
pathname === ROUTES.BILLING ||
pathname === ROUTES.MY_SETTINGS);
if (
pathname &&
pathname !== ROUTES.WORKSPACE_LOCKED &&
!isRouteEnabledForWorkspaceBlockedState
) {
history.push(ROUTES.WORKSPACE_LOCKED);
}
}, [isAdmin, pathname]);
const navigateToWorkSpaceAccessRestricted = useCallback((): void => {
if (pathname && pathname !== ROUTES.WORKSPACE_ACCESS_RESTRICTED) {
history.push(ROUTES.WORKSPACE_ACCESS_RESTRICTED);
}
}, [pathname]);
useEffect(() => {
if (!isFetchingActiveLicense && activeLicense) {
const isTerminated = activeLicense.state === LicenseState.TERMINATED;
const isExpired = activeLicense.state === LicenseState.EXPIRED;
const isCancelled = activeLicense.state === LicenseState.CANCELLED;
const isWorkspaceAccessRestricted = isTerminated || isExpired || isCancelled;
const { platform } = activeLicense;
if (isWorkspaceAccessRestricted && platform === LicensePlatform.CLOUD) {
navigateToWorkSpaceAccessRestricted();
}
}
}, [
isFetchingActiveLicense,
activeLicense,
navigateToWorkSpaceAccessRestricted,
]);
useEffect(() => {
if (!isFetchingActiveLicense) {
const shouldBlockWorkspace = trialInfo?.workSpaceBlock;
if (
shouldBlockWorkspace &&
activeLicense?.platform === LicensePlatform.CLOUD
) {
navigateToWorkSpaceBlocked();
}
}
}, [
isFetchingActiveLicense,
trialInfo?.workSpaceBlock,
activeLicense?.platform,
navigateToWorkSpaceBlocked,
]);
const navigateToWorkSpaceSuspended = useCallback((): void => {
if (pathname && pathname !== ROUTES.WORKSPACE_SUSPENDED) {
history.push(ROUTES.WORKSPACE_SUSPENDED);
}
}, [pathname]);
useEffect(() => {
if (!isFetchingActiveLicense && activeLicense) {
const shouldSuspendWorkspace =
activeLicense.state === LicenseState.DEFAULTED;
if (
shouldSuspendWorkspace &&
activeLicense.platform === LicensePlatform.CLOUD
) {
navigateToWorkSpaceSuspended();
}
}
}, [isFetchingActiveLicense, activeLicense, navigateToWorkSpaceSuspended]);
useEffect(() => {
if (org && org.length > 0 && org[0].id !== undefined) {
setOrgData(org[0]);
}
}, [org]);
// if the feature flag is enabled and the current route is /get-started then redirect to /get-started-with-signoz-cloud
useEffect(() => {
if (
currentRoute?.path === ROUTES.GET_STARTED &&
featureFlags?.find((e) => e.name === FeatureKeys.ONBOARDING_V3)?.active
) {
history.push(ROUTES.GET_STARTED_WITH_CLOUD);
}
}, [currentRoute, featureFlags]);
// eslint-disable-next-line sonarjs/cognitive-complexity
useEffect(() => {
// if it is an old route navigate to the new route
if (isOldRoute) {
// this will be handled by the redirect component below
return;
}
// if the current route is public dashboard then don't redirect to login
const isPublicDashboard = currentRoute?.path === ROUTES.PUBLIC_DASHBOARD;
if (isPublicDashboard) {
return;
}
// if the current route
if (currentRoute) {
const { isPrivate, key } = currentRoute;
if (isPrivate) {
if (isLoggedInState) {
const route = routePermission[key];
if (route && route.find((e) => e === user.role) === undefined) {
history.push(ROUTES.UN_AUTHORIZED);
}
} else {
setLocalStorageApi(LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT, pathname);
history.push(ROUTES.LOGIN);
}
} else if (isLoggedInState) {
const fromPathname = getLocalStorageApi(
LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT,
);
if (fromPathname) {
history.push(fromPathname);
setLocalStorageApi(LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT, '');
} else if (pathname !== ROUTES.SOMETHING_WENT_WRONG) {
history.push(ROUTES.HOME);
}
} else {
// do nothing as the unauthenticated routes are LOGIN and SIGNUP and the LOGIN container takes care of routing to signup if
// setup is not completed
}
} else if (isLoggedInState) {
const fromPathname = getLocalStorageApi(
LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT,
);
if (fromPathname) {
history.push(fromPathname);
setLocalStorageApi(LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT, '');
} else {
history.push(ROUTES.HOME);
}
} else {
setLocalStorageApi(LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT, pathname);
history.push(ROUTES.LOGIN);
}
}, [isLoggedInState, pathname, user, isOldRoute, currentRoute, location]);
// Handle old routes - redirect to new routes
if (isOldRoute) {
const redirectUrl = oldNewRoutesMapping[pathname];
return (
@@ -296,7 +93,143 @@ function PrivateRoute({ children }: PrivateRouteProps): JSX.Element {
);
}
// NOTE: disabling this rule as there is no need to have div
// Public dashboard - no redirect needed
const isPublicDashboard = currentRoute?.path === ROUTES.PUBLIC_DASHBOARD;
if (isPublicDashboard) {
return <>{children}</>;
}
// Check for workspace access restriction (cloud only)
const isCloudPlatform = activeLicense?.platform === LicensePlatform.CLOUD;
if (!isFetchingActiveLicense && activeLicense && isCloudPlatform) {
const isTerminated = activeLicense.state === LicenseState.TERMINATED;
const isExpired = activeLicense.state === LicenseState.EXPIRED;
const isCancelled = activeLicense.state === LicenseState.CANCELLED;
const isWorkspaceAccessRestricted = isTerminated || isExpired || isCancelled;
if (
isWorkspaceAccessRestricted &&
pathname !== ROUTES.WORKSPACE_ACCESS_RESTRICTED
) {
return <Redirect to={ROUTES.WORKSPACE_ACCESS_RESTRICTED} />;
}
// Check for workspace suspended (DEFAULTED)
const shouldSuspendWorkspace = activeLicense.state === LicenseState.DEFAULTED;
if (shouldSuspendWorkspace && pathname !== ROUTES.WORKSPACE_SUSPENDED) {
return <Redirect to={ROUTES.WORKSPACE_SUSPENDED} />;
}
}
// Check for workspace blocked (trial expired)
if (!isFetchingActiveLicense && isCloudPlatform && trialInfo?.workSpaceBlock) {
const isRouteEnabledForWorkspaceBlockedState =
isAdmin &&
(pathname === ROUTES.SETTINGS ||
pathname === ROUTES.ORG_SETTINGS ||
pathname === ROUTES.MEMBERS_SETTINGS ||
pathname === ROUTES.BILLING ||
pathname === ROUTES.MY_SETTINGS);
if (
pathname !== ROUTES.WORKSPACE_LOCKED &&
!isRouteEnabledForWorkspaceBlockedState
) {
return <Redirect to={ROUTES.WORKSPACE_LOCKED} />;
}
}
// Check for onboarding redirect (cloud users, first user, onboarding not complete)
if (
isCloudUserVal &&
!isFetchingOrgPreferences &&
orgPreferences &&
!isFetchingUsers &&
usersData &&
usersData.data
) {
const isOnboardingComplete = orgPreferences?.find(
(preference: OrgPreference) =>
preference.name === ORG_PREFERENCES.ORG_ONBOARDING,
)?.value;
// Don't redirect to onboarding if workspace has issues
const isWorkspaceBlocked = trialInfo?.workSpaceBlock;
const isWorkspaceSuspended = activeLicense?.state === LicenseState.DEFAULTED;
const isWorkspaceAccessRestricted =
activeLicense?.state === LicenseState.TERMINATED ||
activeLicense?.state === LicenseState.EXPIRED ||
activeLicense?.state === LicenseState.CANCELLED;
const hasWorkspaceIssue =
isWorkspaceBlocked || isWorkspaceSuspended || isWorkspaceAccessRestricted;
if (!hasWorkspaceIssue) {
const isFirstUser = checkFirstTimeUser();
if (
isFirstUser &&
!isOnboardingComplete &&
!ROUTES_NOT_TO_BE_OVERRIDEN.includes(pathname) &&
pathname !== ROUTES.ONBOARDING
) {
return <Redirect to={ROUTES.ONBOARDING} />;
}
}
}
// Check for GET_STARTED → GET_STARTED_WITH_CLOUD redirect (feature flag)
if (
currentRoute?.path === ROUTES.GET_STARTED &&
featureFlags?.find((e) => e.name === FeatureKeys.ONBOARDING_V3)?.active
) {
return <Redirect to={ROUTES.GET_STARTED_WITH_CLOUD} />;
}
// Main routing logic
if (currentRoute) {
const { isPrivate, key } = currentRoute;
if (isPrivate) {
if (isLoggedInState) {
const route = routePermission[key];
if (route && route.find((e) => e === user.role) === undefined) {
return <Redirect to={ROUTES.UN_AUTHORIZED} />;
}
} else {
// Save current path and redirect to login
setLocalStorageApi(LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT, pathname);
return <Redirect to={ROUTES.LOGIN} />;
}
} else if (isLoggedInState) {
// Non-private route, but user is logged in
const fromPathname = getLocalStorageApi(
LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT,
);
if (fromPathname) {
setLocalStorageApi(LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT, '');
return <Redirect to={fromPathname} />;
}
if (pathname !== ROUTES.SOMETHING_WENT_WRONG) {
return <Redirect to={ROUTES.HOME} />;
}
}
// Non-private route, user not logged in - let login/signup pages handle it
} else if (isLoggedInState) {
// Unknown route, logged in
const fromPathname = getLocalStorageApi(
LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT,
);
if (fromPathname) {
setLocalStorageApi(LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT, '');
return <Redirect to={fromPathname} />;
}
return <Redirect to={ROUTES.HOME} />;
} else {
// Unknown route, not logged in
setLocalStorageApi(LOCALSTORAGE.UNAUTHENTICATED_ROUTE_HIT, pathname);
return <Redirect to={ROUTES.LOGIN} />;
}
return <>{children}</>;
}

View File

@@ -6,7 +6,6 @@ import { FeatureKeys } from 'constants/features';
import { LOCALSTORAGE } from 'constants/localStorage';
import { ORG_PREFERENCES } from 'constants/orgPreferences';
import ROUTES from 'constants/routes';
import history from 'lib/history';
import { AppContext } from 'providers/App/App';
import { IAppContext, IUser } from 'providers/App/types';
import {
@@ -22,19 +21,6 @@ import { ROLES, USER_ROLES } from 'types/roles';
import PrivateRoute from '../Private';
// Mock history module
jest.mock('lib/history', () => ({
__esModule: true,
default: {
push: jest.fn(),
location: { pathname: '/', search: '', hash: '' },
listen: jest.fn(),
createHref: jest.fn(),
},
}));
const mockHistoryPush = history.push as jest.Mock;
// Mock localStorage APIs
const mockLocalStorage: Record<string, string> = {};
jest.mock('api/browser/localstorage/get', () => ({
@@ -239,20 +225,18 @@ function renderPrivateRoute(options: RenderPrivateRouteOptions = {}): void {
}
// Generic assertion helpers for navigation behavior
// Using these allows easier refactoring when switching from history.push to Redirect component
// Using location-based assertions since Private.tsx now uses Redirect component
async function assertRedirectsTo(targetRoute: string): Promise<void> {
await waitFor(() => {
expect(mockHistoryPush).toHaveBeenCalledWith(targetRoute);
expect(screen.getByTestId('location-display')).toHaveTextContent(targetRoute);
});
}
function assertNoRedirect(): void {
expect(mockHistoryPush).not.toHaveBeenCalled();
}
function assertDoesNotRedirectTo(targetRoute: string): void {
expect(mockHistoryPush).not.toHaveBeenCalledWith(targetRoute);
function assertStaysOnRoute(expectedRoute: string): void {
expect(screen.getByTestId('location-display')).toHaveTextContent(
expectedRoute,
);
}
function assertRendersChildren(): void {
@@ -350,7 +334,7 @@ describe('PrivateRoute', () => {
});
assertRendersChildren();
assertNoRedirect();
assertStaysOnRoute('/public/dashboard/abc123');
});
it('should render children for public dashboard route when logged in without redirecting', () => {
@@ -362,7 +346,7 @@ describe('PrivateRoute', () => {
assertRendersChildren();
// Critical: without the isPublicDashboard early return, logged-in users
// would be redirected to HOME due to the non-private route handling
assertNoRedirect();
assertStaysOnRoute('/public/dashboard/abc123');
});
});
@@ -420,7 +404,7 @@ describe('PrivateRoute', () => {
});
assertRendersChildren();
assertNoRedirect();
assertStaysOnRoute(ROUTES.HOME);
});
it('should redirect to unauthorized when VIEWER tries to access admin-only route /alerts/new', async () => {
@@ -529,7 +513,7 @@ describe('PrivateRoute', () => {
appContext: { isLoggedIn: true },
});
assertDoesNotRedirectTo(ROUTES.HOME);
assertStaysOnRoute(ROUTES.SOMETHING_WENT_WRONG);
});
});
@@ -541,7 +525,7 @@ describe('PrivateRoute', () => {
});
// Should not redirect - login page handles its own routing
assertNoRedirect();
assertStaysOnRoute(ROUTES.LOGIN);
});
it('should not redirect when not logged in user visits signup page', () => {
@@ -550,7 +534,7 @@ describe('PrivateRoute', () => {
appContext: { isLoggedIn: false },
});
assertNoRedirect();
assertStaysOnRoute(ROUTES.SIGN_UP);
});
it('should not redirect when not logged in user visits password reset page', () => {
@@ -559,7 +543,7 @@ describe('PrivateRoute', () => {
appContext: { isLoggedIn: false },
});
assertNoRedirect();
assertStaysOnRoute(ROUTES.PASSWORD_RESET);
});
it('should not redirect when not logged in user visits forgot password page', () => {
@@ -568,7 +552,7 @@ describe('PrivateRoute', () => {
appContext: { isLoggedIn: false },
});
assertNoRedirect();
assertStaysOnRoute(ROUTES.FORGOT_PASSWORD);
});
});
@@ -657,7 +641,7 @@ describe('PrivateRoute', () => {
});
// Admin should be able to access settings even when workspace is blocked
assertDoesNotRedirectTo(ROUTES.WORKSPACE_LOCKED);
assertStaysOnRoute(ROUTES.SETTINGS);
});
it('should allow ADMIN to access /settings/billing when workspace is blocked', () => {
@@ -673,7 +657,7 @@ describe('PrivateRoute', () => {
isCloudUser: true,
});
assertDoesNotRedirectTo(ROUTES.WORKSPACE_LOCKED);
assertStaysOnRoute(ROUTES.BILLING);
});
it('should allow ADMIN to access /settings/org-settings when workspace is blocked', () => {
@@ -689,7 +673,7 @@ describe('PrivateRoute', () => {
isCloudUser: true,
});
assertDoesNotRedirectTo(ROUTES.WORKSPACE_LOCKED);
assertStaysOnRoute(ROUTES.ORG_SETTINGS);
});
it('should allow ADMIN to access /settings/members when workspace is blocked', () => {
@@ -705,7 +689,7 @@ describe('PrivateRoute', () => {
isCloudUser: true,
});
assertDoesNotRedirectTo(ROUTES.WORKSPACE_LOCKED);
assertStaysOnRoute(ROUTES.MEMBERS_SETTINGS);
});
it('should allow ADMIN to access /settings/my-settings when workspace is blocked', () => {
@@ -721,7 +705,7 @@ describe('PrivateRoute', () => {
isCloudUser: true,
});
assertDoesNotRedirectTo(ROUTES.WORKSPACE_LOCKED);
assertStaysOnRoute(ROUTES.MY_SETTINGS);
});
it('should redirect VIEWER to workspace locked even when trying to access settings', async () => {
@@ -832,7 +816,7 @@ describe('PrivateRoute', () => {
isCloudUser: true,
});
assertDoesNotRedirectTo(ROUTES.WORKSPACE_LOCKED);
assertStaysOnRoute(ROUTES.WORKSPACE_LOCKED);
});
it('should not redirect self-hosted users to workspace locked even when workSpaceBlock is true', () => {
@@ -849,7 +833,7 @@ describe('PrivateRoute', () => {
isCloudUser: false,
});
assertDoesNotRedirectTo(ROUTES.WORKSPACE_LOCKED);
assertStaysOnRoute(ROUTES.HOME);
});
});
@@ -919,7 +903,7 @@ describe('PrivateRoute', () => {
isCloudUser: true,
});
assertDoesNotRedirectTo(ROUTES.WORKSPACE_ACCESS_RESTRICTED);
assertStaysOnRoute(ROUTES.WORKSPACE_ACCESS_RESTRICTED);
});
it('should not redirect self-hosted users to workspace access restricted when license is terminated', () => {
@@ -936,7 +920,7 @@ describe('PrivateRoute', () => {
isCloudUser: false,
});
assertDoesNotRedirectTo(ROUTES.WORKSPACE_ACCESS_RESTRICTED);
assertStaysOnRoute(ROUTES.HOME);
});
it('should not redirect when license is ACTIVE', () => {
@@ -953,7 +937,7 @@ describe('PrivateRoute', () => {
isCloudUser: true,
});
assertDoesNotRedirectTo(ROUTES.WORKSPACE_ACCESS_RESTRICTED);
assertStaysOnRoute(ROUTES.HOME);
});
it('should not redirect when license is EVALUATING', () => {
@@ -970,7 +954,7 @@ describe('PrivateRoute', () => {
isCloudUser: true,
});
assertDoesNotRedirectTo(ROUTES.WORKSPACE_ACCESS_RESTRICTED);
assertStaysOnRoute(ROUTES.HOME);
});
});
@@ -1006,7 +990,7 @@ describe('PrivateRoute', () => {
isCloudUser: true,
});
assertDoesNotRedirectTo(ROUTES.WORKSPACE_SUSPENDED);
assertStaysOnRoute(ROUTES.WORKSPACE_SUSPENDED);
});
it('should not redirect self-hosted users to workspace suspended when license is defaulted', () => {
@@ -1023,7 +1007,7 @@ describe('PrivateRoute', () => {
isCloudUser: false,
});
assertDoesNotRedirectTo(ROUTES.WORKSPACE_SUSPENDED);
assertStaysOnRoute(ROUTES.HOME);
});
});
@@ -1043,6 +1027,11 @@ describe('PrivateRoute', () => {
isCloudUser: true,
});
// Wait for the users query to complete and trigger re-render
await act(async () => {
await Promise.resolve();
});
await assertRedirectsTo(ROUTES.ONBOARDING);
});
@@ -1058,7 +1047,7 @@ describe('PrivateRoute', () => {
isCloudUser: true,
});
assertDoesNotRedirectTo(ROUTES.ONBOARDING);
assertStaysOnRoute(ROUTES.HOME);
});
it('should not redirect to onboarding when onboarding is already complete', async () => {
@@ -1084,7 +1073,7 @@ describe('PrivateRoute', () => {
// Critical: if isOnboardingComplete check is broken (always false),
// this test would fail because all other conditions for redirect ARE met
assertDoesNotRedirectTo(ROUTES.ONBOARDING);
assertStaysOnRoute(ROUTES.HOME);
});
it('should not redirect to onboarding for non-cloud users', () => {
@@ -1099,7 +1088,7 @@ describe('PrivateRoute', () => {
isCloudUser: false,
});
assertDoesNotRedirectTo(ROUTES.ONBOARDING);
assertStaysOnRoute(ROUTES.HOME);
});
it('should not redirect to onboarding when on /workspace-locked route', () => {
@@ -1114,7 +1103,7 @@ describe('PrivateRoute', () => {
isCloudUser: true,
});
assertDoesNotRedirectTo(ROUTES.ONBOARDING);
assertStaysOnRoute(ROUTES.WORKSPACE_LOCKED);
});
it('should not redirect to onboarding when on /workspace-suspended route', () => {
@@ -1129,7 +1118,7 @@ describe('PrivateRoute', () => {
isCloudUser: true,
});
assertDoesNotRedirectTo(ROUTES.ONBOARDING);
assertStaysOnRoute(ROUTES.WORKSPACE_SUSPENDED);
});
it('should not redirect to onboarding when workspace is blocked and accessing billing', async () => {
@@ -1156,7 +1145,7 @@ describe('PrivateRoute', () => {
});
// Should NOT redirect to onboarding - user needs to access billing to fix payment
assertDoesNotRedirectTo(ROUTES.ONBOARDING);
assertStaysOnRoute(ROUTES.BILLING);
});
it('should not redirect to onboarding when workspace is blocked and accessing settings', async () => {
@@ -1180,7 +1169,7 @@ describe('PrivateRoute', () => {
await Promise.resolve();
});
assertDoesNotRedirectTo(ROUTES.ONBOARDING);
assertStaysOnRoute(ROUTES.SETTINGS);
});
it('should not redirect to onboarding when workspace is suspended (DEFAULTED)', async () => {
@@ -1207,7 +1196,7 @@ describe('PrivateRoute', () => {
});
// Should redirect to WORKSPACE_SUSPENDED, not ONBOARDING
assertDoesNotRedirectTo(ROUTES.ONBOARDING);
await assertRedirectsTo(ROUTES.WORKSPACE_SUSPENDED);
});
it('should not redirect to onboarding when workspace is access restricted (TERMINATED)', async () => {
@@ -1234,7 +1223,7 @@ describe('PrivateRoute', () => {
});
// Should redirect to WORKSPACE_ACCESS_RESTRICTED, not ONBOARDING
assertDoesNotRedirectTo(ROUTES.ONBOARDING);
await assertRedirectsTo(ROUTES.WORKSPACE_ACCESS_RESTRICTED);
});
it('should not redirect to onboarding when workspace is access restricted (EXPIRED)', async () => {
@@ -1260,7 +1249,7 @@ describe('PrivateRoute', () => {
await Promise.resolve();
});
assertDoesNotRedirectTo(ROUTES.ONBOARDING);
await assertRedirectsTo(ROUTES.WORKSPACE_ACCESS_RESTRICTED);
});
});
@@ -1302,7 +1291,7 @@ describe('PrivateRoute', () => {
},
});
assertDoesNotRedirectTo(ROUTES.GET_STARTED_WITH_CLOUD);
assertStaysOnRoute(ROUTES.GET_STARTED);
});
it('should not redirect when on GET_STARTED and ONBOARDING_V3 feature flag is not present', () => {
@@ -1314,7 +1303,7 @@ describe('PrivateRoute', () => {
},
});
assertDoesNotRedirectTo(ROUTES.GET_STARTED_WITH_CLOUD);
assertStaysOnRoute(ROUTES.GET_STARTED);
});
it('should not redirect when on different route even if ONBOARDING_V3 is active', () => {
@@ -1334,7 +1323,7 @@ describe('PrivateRoute', () => {
},
});
assertDoesNotRedirectTo(ROUTES.GET_STARTED_WITH_CLOUD);
assertStaysOnRoute(ROUTES.HOME);
});
});
@@ -1350,7 +1339,7 @@ describe('PrivateRoute', () => {
},
});
assertDoesNotRedirectTo(ROUTES.WORKSPACE_LOCKED);
assertStaysOnRoute(ROUTES.HOME);
});
it('should not fetch users when org data is not available', () => {
@@ -1393,9 +1382,7 @@ describe('PrivateRoute', () => {
},
});
assertDoesNotRedirectTo(ROUTES.WORKSPACE_LOCKED);
assertDoesNotRedirectTo(ROUTES.WORKSPACE_SUSPENDED);
assertDoesNotRedirectTo(ROUTES.WORKSPACE_ACCESS_RESTRICTED);
assertStaysOnRoute(ROUTES.HOME);
});
});
@@ -1436,22 +1423,40 @@ describe('PrivateRoute', () => {
await assertRedirectsTo(ROUTES.UN_AUTHORIZED);
});
it('should allow all roles to access /services route', () => {
const roles = [USER_ROLES.ADMIN, USER_ROLES.EDITOR, USER_ROLES.VIEWER];
roles.forEach((role) => {
jest.clearAllMocks();
renderPrivateRoute({
initialRoute: ROUTES.APPLICATION,
appContext: {
isLoggedIn: true,
user: createMockUser({ role: role as ROLES }),
},
});
assertDoesNotRedirectTo(ROUTES.UN_AUTHORIZED);
it('should allow ADMIN to access /services route', () => {
renderPrivateRoute({
initialRoute: ROUTES.APPLICATION,
appContext: {
isLoggedIn: true,
user: createMockUser({ role: USER_ROLES.ADMIN as ROLES }),
},
});
assertStaysOnRoute(ROUTES.APPLICATION);
});
it('should allow EDITOR to access /services route', () => {
renderPrivateRoute({
initialRoute: ROUTES.APPLICATION,
appContext: {
isLoggedIn: true,
user: createMockUser({ role: USER_ROLES.EDITOR as ROLES }),
},
});
assertStaysOnRoute(ROUTES.APPLICATION);
});
it('should allow VIEWER to access /services route', () => {
renderPrivateRoute({
initialRoute: ROUTES.APPLICATION,
appContext: {
isLoggedIn: true,
user: createMockUser({ role: USER_ROLES.VIEWER as ROLES }),
},
});
assertStaysOnRoute(ROUTES.APPLICATION);
});
it('should redirect VIEWER from /onboarding route (admin only)', async () => {
@@ -1481,7 +1486,7 @@ describe('PrivateRoute', () => {
});
assertRendersChildren();
assertDoesNotRedirectTo(ROUTES.UN_AUTHORIZED);
assertStaysOnRoute(ROUTES.CHANNELS_NEW);
});
it('should allow EDITOR to access /get-started route', () => {
@@ -1493,7 +1498,7 @@ describe('PrivateRoute', () => {
},
});
assertDoesNotRedirectTo(ROUTES.UN_AUTHORIZED);
assertStaysOnRoute(ROUTES.GET_STARTED);
});
});

View File

@@ -33,6 +33,8 @@ import type {
DisconnectAccountPathParameters,
GetAccount200,
GetAccountPathParameters,
GetConnectionCredentials200,
GetConnectionCredentialsPathParameters,
GetService200,
GetServicePathParameters,
ListAccounts200,
@@ -628,6 +630,103 @@ export const useUpdateAccount = <
return useMutation(mutationOptions);
};
/**
* This endpoint updates a service for the specified cloud provider
* @summary Update service
*/
export const updateService = (
{ cloudProvider, id, serviceId }: UpdateServicePathParameters,
cloudintegrationtypesUpdatableServiceDTO: BodyType<CloudintegrationtypesUpdatableServiceDTO>,
) => {
return GeneratedAPIInstance<void>({
url: `/api/v1/cloud_integrations/${cloudProvider}/accounts/${id}/services/${serviceId}`,
method: 'PUT',
headers: { 'Content-Type': 'application/json' },
data: cloudintegrationtypesUpdatableServiceDTO,
});
};
export const getUpdateServiceMutationOptions = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof updateService>>,
TError,
{
pathParams: UpdateServicePathParameters;
data: BodyType<CloudintegrationtypesUpdatableServiceDTO>;
},
TContext
>;
}): UseMutationOptions<
Awaited<ReturnType<typeof updateService>>,
TError,
{
pathParams: UpdateServicePathParameters;
data: BodyType<CloudintegrationtypesUpdatableServiceDTO>;
},
TContext
> => {
const mutationKey = ['updateService'];
const { mutation: mutationOptions } = options
? options.mutation &&
'mutationKey' in options.mutation &&
options.mutation.mutationKey
? options
: { ...options, mutation: { ...options.mutation, mutationKey } }
: { mutation: { mutationKey } };
const mutationFn: MutationFunction<
Awaited<ReturnType<typeof updateService>>,
{
pathParams: UpdateServicePathParameters;
data: BodyType<CloudintegrationtypesUpdatableServiceDTO>;
}
> = (props) => {
const { pathParams, data } = props ?? {};
return updateService(pathParams, data);
};
return { mutationFn, ...mutationOptions };
};
export type UpdateServiceMutationResult = NonNullable<
Awaited<ReturnType<typeof updateService>>
>;
export type UpdateServiceMutationBody = BodyType<CloudintegrationtypesUpdatableServiceDTO>;
export type UpdateServiceMutationError = ErrorType<RenderErrorResponseDTO>;
/**
* @summary Update service
*/
export const useUpdateService = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof updateService>>,
TError,
{
pathParams: UpdateServicePathParameters;
data: BodyType<CloudintegrationtypesUpdatableServiceDTO>;
},
TContext
>;
}): UseMutationResult<
Awaited<ReturnType<typeof updateService>>,
TError,
{
pathParams: UpdateServicePathParameters;
data: BodyType<CloudintegrationtypesUpdatableServiceDTO>;
},
TContext
> => {
const mutationOptions = getUpdateServiceMutationOptions(options);
return useMutation(mutationOptions);
};
/**
* This endpoint is called by the deployed agent to check in
* @summary Agent check-in
@@ -727,6 +826,114 @@ export const useAgentCheckIn = <
return useMutation(mutationOptions);
};
/**
* This endpoint retrieves the connection credentials required for integration
* @summary Get connection credentials
*/
export const getConnectionCredentials = (
{ cloudProvider }: GetConnectionCredentialsPathParameters,
signal?: AbortSignal,
) => {
return GeneratedAPIInstance<GetConnectionCredentials200>({
url: `/api/v1/cloud_integrations/${cloudProvider}/credentials`,
method: 'GET',
signal,
});
};
export const getGetConnectionCredentialsQueryKey = ({
cloudProvider,
}: GetConnectionCredentialsPathParameters) => {
return [`/api/v1/cloud_integrations/${cloudProvider}/credentials`] as const;
};
export const getGetConnectionCredentialsQueryOptions = <
TData = Awaited<ReturnType<typeof getConnectionCredentials>>,
TError = ErrorType<RenderErrorResponseDTO>
>(
{ cloudProvider }: GetConnectionCredentialsPathParameters,
options?: {
query?: UseQueryOptions<
Awaited<ReturnType<typeof getConnectionCredentials>>,
TError,
TData
>;
},
) => {
const { query: queryOptions } = options ?? {};
const queryKey =
queryOptions?.queryKey ??
getGetConnectionCredentialsQueryKey({ cloudProvider });
const queryFn: QueryFunction<
Awaited<ReturnType<typeof getConnectionCredentials>>
> = ({ signal }) => getConnectionCredentials({ cloudProvider }, signal);
return {
queryKey,
queryFn,
enabled: !!cloudProvider,
...queryOptions,
} as UseQueryOptions<
Awaited<ReturnType<typeof getConnectionCredentials>>,
TError,
TData
> & { queryKey: QueryKey };
};
export type GetConnectionCredentialsQueryResult = NonNullable<
Awaited<ReturnType<typeof getConnectionCredentials>>
>;
export type GetConnectionCredentialsQueryError = ErrorType<RenderErrorResponseDTO>;
/**
* @summary Get connection credentials
*/
export function useGetConnectionCredentials<
TData = Awaited<ReturnType<typeof getConnectionCredentials>>,
TError = ErrorType<RenderErrorResponseDTO>
>(
{ cloudProvider }: GetConnectionCredentialsPathParameters,
options?: {
query?: UseQueryOptions<
Awaited<ReturnType<typeof getConnectionCredentials>>,
TError,
TData
>;
},
): UseQueryResult<TData, TError> & { queryKey: QueryKey } {
const queryOptions = getGetConnectionCredentialsQueryOptions(
{ cloudProvider },
options,
);
const query = useQuery(queryOptions) as UseQueryResult<TData, TError> & {
queryKey: QueryKey;
};
query.queryKey = queryOptions.queryKey;
return query;
}
/**
* @summary Get connection credentials
*/
export const invalidateGetConnectionCredentials = async (
queryClient: QueryClient,
{ cloudProvider }: GetConnectionCredentialsPathParameters,
options?: InvalidateOptions,
): Promise<QueryClient> => {
await queryClient.invalidateQueries(
{ queryKey: getGetConnectionCredentialsQueryKey({ cloudProvider }) },
options,
);
return queryClient;
};
/**
* This endpoint lists the services metadata for the specified cloud provider
* @summary List services metadata
@@ -941,101 +1148,3 @@ export const invalidateGetService = async (
return queryClient;
};
/**
* This endpoint updates a service for the specified cloud provider
* @summary Update service
*/
export const updateService = (
{ cloudProvider, serviceId }: UpdateServicePathParameters,
cloudintegrationtypesUpdatableServiceDTO: BodyType<CloudintegrationtypesUpdatableServiceDTO>,
) => {
return GeneratedAPIInstance<void>({
url: `/api/v1/cloud_integrations/${cloudProvider}/services/${serviceId}`,
method: 'PUT',
headers: { 'Content-Type': 'application/json' },
data: cloudintegrationtypesUpdatableServiceDTO,
});
};
export const getUpdateServiceMutationOptions = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof updateService>>,
TError,
{
pathParams: UpdateServicePathParameters;
data: BodyType<CloudintegrationtypesUpdatableServiceDTO>;
},
TContext
>;
}): UseMutationOptions<
Awaited<ReturnType<typeof updateService>>,
TError,
{
pathParams: UpdateServicePathParameters;
data: BodyType<CloudintegrationtypesUpdatableServiceDTO>;
},
TContext
> => {
const mutationKey = ['updateService'];
const { mutation: mutationOptions } = options
? options.mutation &&
'mutationKey' in options.mutation &&
options.mutation.mutationKey
? options
: { ...options, mutation: { ...options.mutation, mutationKey } }
: { mutation: { mutationKey } };
const mutationFn: MutationFunction<
Awaited<ReturnType<typeof updateService>>,
{
pathParams: UpdateServicePathParameters;
data: BodyType<CloudintegrationtypesUpdatableServiceDTO>;
}
> = (props) => {
const { pathParams, data } = props ?? {};
return updateService(pathParams, data);
};
return { mutationFn, ...mutationOptions };
};
export type UpdateServiceMutationResult = NonNullable<
Awaited<ReturnType<typeof updateService>>
>;
export type UpdateServiceMutationBody = BodyType<CloudintegrationtypesUpdatableServiceDTO>;
export type UpdateServiceMutationError = ErrorType<RenderErrorResponseDTO>;
/**
* @summary Update service
*/
export const useUpdateService = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof updateService>>,
TError,
{
pathParams: UpdateServicePathParameters;
data: BodyType<CloudintegrationtypesUpdatableServiceDTO>;
},
TContext
>;
}): UseMutationResult<
Awaited<ReturnType<typeof updateService>>,
TError,
{
pathParams: UpdateServicePathParameters;
data: BodyType<CloudintegrationtypesUpdatableServiceDTO>;
},
TContext
> => {
const mutationOptions = getUpdateServiceMutationOptions(options);
return useMutation(mutationOptions);
};

View File

@@ -1,104 +0,0 @@
/**
* ! Do not edit manually
* * The file has been auto-generated using Orval for SigNoz
* * regenerate with 'yarn generate:api'
* SigNoz
*/
import type {
MutationFunction,
UseMutationOptions,
UseMutationResult,
} from 'react-query';
import { useMutation } from 'react-query';
import type { BodyType, ErrorType } from '../../../generatedAPIInstance';
import { GeneratedAPIInstance } from '../../../generatedAPIInstance';
import type {
HostsList200,
InframonitoringtypesHostsListRequestDTO,
RenderErrorResponseDTO,
} from '../sigNoz.schemas';
/**
* This endpoint returns a list of hosts along with other information for each of them
* @summary List Hosts for Infra Monitoring
*/
export const hostsList = (
inframonitoringtypesHostsListRequestDTO: BodyType<InframonitoringtypesHostsListRequestDTO>,
signal?: AbortSignal,
) => {
return GeneratedAPIInstance<HostsList200>({
url: `/api/v2/infra-monitoring/hosts/list`,
method: 'POST',
headers: { 'Content-Type': 'application/json' },
data: inframonitoringtypesHostsListRequestDTO,
signal,
});
};
export const getHostsListMutationOptions = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof hostsList>>,
TError,
{ data: BodyType<InframonitoringtypesHostsListRequestDTO> },
TContext
>;
}): UseMutationOptions<
Awaited<ReturnType<typeof hostsList>>,
TError,
{ data: BodyType<InframonitoringtypesHostsListRequestDTO> },
TContext
> => {
const mutationKey = ['hostsList'];
const { mutation: mutationOptions } = options
? options.mutation &&
'mutationKey' in options.mutation &&
options.mutation.mutationKey
? options
: { ...options, mutation: { ...options.mutation, mutationKey } }
: { mutation: { mutationKey } };
const mutationFn: MutationFunction<
Awaited<ReturnType<typeof hostsList>>,
{ data: BodyType<InframonitoringtypesHostsListRequestDTO> }
> = (props) => {
const { data } = props ?? {};
return hostsList(data);
};
return { mutationFn, ...mutationOptions };
};
export type HostsListMutationResult = NonNullable<
Awaited<ReturnType<typeof hostsList>>
>;
export type HostsListMutationBody = BodyType<InframonitoringtypesHostsListRequestDTO>;
export type HostsListMutationError = ErrorType<RenderErrorResponseDTO>;
/**
* @summary List Hosts for Infra Monitoring
*/
export const useHostsList = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof hostsList>>,
TError,
{ data: BodyType<InframonitoringtypesHostsListRequestDTO> },
TContext
>;
}): UseMutationResult<
Awaited<ReturnType<typeof hostsList>>,
TError,
{ data: BodyType<InframonitoringtypesHostsListRequestDTO> },
TContext
> => {
const mutationOptions = getHostsListMutationOptions(options);
return useMutation(mutationOptions);
};

View File

@@ -517,12 +517,12 @@ export type CloudintegrationtypesAWSCollectionStrategyDTOS3Buckets = {
};
export interface CloudintegrationtypesAWSCollectionStrategyDTO {
aws_logs?: CloudintegrationtypesAWSLogsStrategyDTO;
aws_metrics?: CloudintegrationtypesAWSMetricsStrategyDTO;
logs?: CloudintegrationtypesAWSLogsStrategyDTO;
metrics?: CloudintegrationtypesAWSMetricsStrategyDTO;
/**
* @type object
*/
s3_buckets?: CloudintegrationtypesAWSCollectionStrategyDTOS3Buckets;
s3Buckets?: CloudintegrationtypesAWSCollectionStrategyDTOS3Buckets;
}
export interface CloudintegrationtypesAWSConnectionArtifactDTO {
@@ -555,11 +555,11 @@ export type CloudintegrationtypesAWSLogsStrategyDTOCloudwatchLogsSubscriptionsIt
/**
* @type string
*/
filter_pattern?: string;
filterPattern?: string;
/**
* @type string
*/
log_group_name_prefix?: string;
logGroupNamePrefix?: string;
};
export interface CloudintegrationtypesAWSLogsStrategyDTO {
@@ -567,7 +567,7 @@ export interface CloudintegrationtypesAWSLogsStrategyDTO {
* @type array
* @nullable true
*/
cloudwatch_logs_subscriptions?:
cloudwatchLogsSubscriptions?:
| CloudintegrationtypesAWSLogsStrategyDTOCloudwatchLogsSubscriptionsItem[]
| null;
}
@@ -588,7 +588,7 @@ export interface CloudintegrationtypesAWSMetricsStrategyDTO {
* @type array
* @nullable true
*/
cloudwatch_metric_stream_filters?:
cloudwatchMetricStreamFilters?:
| CloudintegrationtypesAWSMetricsStrategyDTOCloudwatchMetricStreamFiltersItem[]
| null;
}
@@ -610,7 +610,7 @@ export interface CloudintegrationtypesAWSServiceLogsConfigDTO {
/**
* @type object
*/
s3_buckets?: CloudintegrationtypesAWSServiceLogsConfigDTOS3Buckets;
s3Buckets?: CloudintegrationtypesAWSServiceLogsConfigDTOS3Buckets;
}
export interface CloudintegrationtypesAWSServiceMetricsConfigDTO {
@@ -693,6 +693,32 @@ export interface CloudintegrationtypesAssetsDTO {
dashboards?: CloudintegrationtypesDashboardDTO[] | null;
}
/**
* @nullable
*/
export type CloudintegrationtypesCloudIntegrationServiceDTO = {
/**
* @type string
*/
cloudIntegrationId?: string;
config?: CloudintegrationtypesServiceConfigDTO;
/**
* @type string
* @format date-time
*/
createdAt?: Date;
/**
* @type string
*/
id: string;
type?: CloudintegrationtypesServiceIDDTO;
/**
* @type string
* @format date-time
*/
updatedAt?: Date;
} | null;
export interface CloudintegrationtypesCollectedLogAttributeDTO {
/**
* @type string
@@ -736,6 +762,11 @@ export interface CloudintegrationtypesConnectionArtifactDTO {
}
export interface CloudintegrationtypesConnectionArtifactRequestDTO {
config: CloudintegrationtypesConnectionArtifactRequestConfigDTO;
credentials: CloudintegrationtypesSignozCredentialsDTO;
}
export interface CloudintegrationtypesConnectionArtifactRequestConfigDTO {
aws: CloudintegrationtypesAWSConnectionArtifactRequestDTO;
}
@@ -831,9 +862,68 @@ export type CloudintegrationtypesIntegrationConfigDTO = {
* @type array
*/
enabled_regions: string[];
telemetry: CloudintegrationtypesAWSCollectionStrategyDTO;
telemetry: CloudintegrationtypesOldAWSCollectionStrategyDTO;
} | null;
export type CloudintegrationtypesOldAWSCollectionStrategyDTOS3Buckets = {
[key: string]: string[];
};
export interface CloudintegrationtypesOldAWSCollectionStrategyDTO {
aws_logs?: CloudintegrationtypesOldAWSLogsStrategyDTO;
aws_metrics?: CloudintegrationtypesOldAWSMetricsStrategyDTO;
/**
* @type string
*/
provider?: string;
/**
* @type object
*/
s3_buckets?: CloudintegrationtypesOldAWSCollectionStrategyDTOS3Buckets;
}
export type CloudintegrationtypesOldAWSLogsStrategyDTOCloudwatchLogsSubscriptionsItem = {
/**
* @type string
*/
filter_pattern?: string;
/**
* @type string
*/
log_group_name_prefix?: string;
};
export interface CloudintegrationtypesOldAWSLogsStrategyDTO {
/**
* @type array
* @nullable true
*/
cloudwatch_logs_subscriptions?:
| CloudintegrationtypesOldAWSLogsStrategyDTOCloudwatchLogsSubscriptionsItem[]
| null;
}
export type CloudintegrationtypesOldAWSMetricsStrategyDTOCloudwatchMetricStreamFiltersItem = {
/**
* @type array
*/
MetricNames?: string[];
/**
* @type string
*/
Namespace?: string;
};
export interface CloudintegrationtypesOldAWSMetricsStrategyDTO {
/**
* @type array
* @nullable true
*/
cloudwatch_metric_stream_filters?:
| CloudintegrationtypesOldAWSMetricsStrategyDTOCloudwatchMetricStreamFiltersItem[]
| null;
}
/**
* @nullable
*/
@@ -871,6 +961,7 @@ export interface CloudintegrationtypesProviderIntegrationConfigDTO {
export interface CloudintegrationtypesServiceDTO {
assets: CloudintegrationtypesAssetsDTO;
cloudIntegrationService: CloudintegrationtypesCloudIntegrationServiceDTO;
dataCollected: CloudintegrationtypesDataCollectedDTO;
/**
* @type string
@@ -884,8 +975,7 @@ export interface CloudintegrationtypesServiceDTO {
* @type string
*/
overview: string;
serviceConfig?: CloudintegrationtypesServiceConfigDTO;
supported_signals: CloudintegrationtypesSupportedSignalsDTO;
supportedSignals: CloudintegrationtypesSupportedSignalsDTO;
telemetryCollectionStrategy: CloudintegrationtypesCollectionStrategyDTO;
/**
* @type string
@@ -897,6 +987,21 @@ export interface CloudintegrationtypesServiceConfigDTO {
aws: CloudintegrationtypesAWSServiceConfigDTO;
}
export enum CloudintegrationtypesServiceIDDTO {
alb = 'alb',
'api-gateway' = 'api-gateway',
dynamodb = 'dynamodb',
ec2 = 'ec2',
ecs = 'ecs',
eks = 'eks',
elasticache = 'elasticache',
lambda = 'lambda',
msk = 'msk',
rds = 'rds',
s3sync = 's3sync',
sns = 'sns',
sqs = 'sqs',
}
export interface CloudintegrationtypesServiceMetadataDTO {
/**
* @type boolean
@@ -916,6 +1021,25 @@ export interface CloudintegrationtypesServiceMetadataDTO {
title: string;
}
export interface CloudintegrationtypesSignozCredentialsDTO {
/**
* @type string
*/
ingestionKey: string;
/**
* @type string
*/
ingestionUrl: string;
/**
* @type string
*/
sigNozApiKey: string;
/**
* @type string
*/
sigNozApiURL: string;
}
export interface CloudintegrationtypesSupportedSignalsDTO {
/**
* @type boolean
@@ -1330,112 +1454,6 @@ export interface GlobaltypesTokenizerConfigDTO {
enabled?: boolean;
}
/**
* @nullable
*/
export type InframonitoringtypesHostRecordDTOMeta = {
[key: string]: unknown;
} | null;
export interface InframonitoringtypesHostRecordDTO {
/**
* @type number
* @format double
*/
cpu?: number;
/**
* @type number
* @format double
*/
diskUsage?: number;
/**
* @type string
*/
hostName?: string;
/**
* @type number
* @format double
*/
load15?: number;
/**
* @type number
* @format double
*/
memory?: number;
/**
* @type object
* @nullable true
*/
meta?: InframonitoringtypesHostRecordDTOMeta;
/**
* @type string
*/
status?: string;
/**
* @type number
* @format double
*/
wait?: number;
}
export enum InframonitoringtypesHostStatusDTO {
active = 'active',
inactive = 'inactive',
'' = '',
}
export interface InframonitoringtypesHostsListRequestDTO {
/**
* @type integer
* @format int64
*/
end?: number;
filter?: Querybuildertypesv5FilterDTO;
filterByStatus?: InframonitoringtypesHostStatusDTO;
/**
* @type array
* @nullable true
*/
groupBy?: Querybuildertypesv5GroupByKeyDTO[] | null;
/**
* @type integer
*/
limit?: number;
/**
* @type integer
*/
offset?: number;
orderBy?: Querybuildertypesv5OrderByDTO;
/**
* @type integer
* @format int64
*/
start?: number;
}
export interface InframonitoringtypesHostsListResponseDTO {
/**
* @type boolean
*/
endTimeBeforeRetention?: boolean;
/**
* @type array
* @nullable true
*/
records?: InframonitoringtypesHostRecordDTO[] | null;
/**
* @type boolean
*/
sentAnyMetricsData?: boolean;
/**
* @type integer
*/
total?: number;
/**
* @type string
*/
type?: string;
}
export interface MetricsexplorertypesInspectMetricsRequestDTO {
/**
* @type integer
@@ -3605,6 +3623,11 @@ export type UpdateAccountPathParameters = {
cloudProvider: string;
id: string;
};
export type UpdateServicePathParameters = {
cloudProvider: string;
id: string;
serviceId: string;
};
export type AgentCheckInPathParameters = {
cloudProvider: string;
};
@@ -3616,6 +3639,17 @@ export type AgentCheckIn200 = {
status: string;
};
export type GetConnectionCredentialsPathParameters = {
cloudProvider: string;
};
export type GetConnectionCredentials200 = {
data: CloudintegrationtypesSignozCredentialsDTO;
/**
* @type string
*/
status: string;
};
export type ListServicesMetadataPathParameters = {
cloudProvider: string;
};
@@ -3639,10 +3673,6 @@ export type GetService200 = {
status: string;
};
export type UpdateServicePathParameters = {
cloudProvider: string;
serviceId: string;
};
export type CreateSessionByGoogleCallback303 = {
data: AuthtypesGettableTokenDTO;
/**
@@ -4298,14 +4328,6 @@ export type Healthz503 = {
status: string;
};
export type HostsList200 = {
data: InframonitoringtypesHostsListResponseDTO;
/**
* @type string
*/
status: string;
};
export type Livez200 = {
data: FactoryResponseDTO;
/**

View File

@@ -13,7 +13,7 @@ import uPlot from 'uplot';
import { ChartProps } from '../types';
const TOOLTIP_WIDTH_PADDING = 60;
const TOOLTIP_WIDTH_PADDING = 120;
const TOOLTIP_MIN_WIDTH = 200;
export default function ChartWrapper({

View File

@@ -0,0 +1,72 @@
.uplot-tooltip-container {
font-family: 'Inter';
font-size: 12px;
background: var(--bg-ink-300);
-webkit-font-smoothing: antialiased;
color: var(--bg-vanilla-100);
border-radius: 6px;
border: 1px solid var(--bg-ink-100);
display: flex;
flex-direction: column;
gap: 8px;
&.lightMode {
background: var(--bg-vanilla-100);
color: var(--bg-ink-500);
border: 1px solid var(--bg-vanilla-300);
.uplot-tooltip-list {
&::-webkit-scrollbar-thumb {
background: var(--bg-vanilla-400);
}
}
.uplot-tooltip-divider {
background-color: var(--bg-vanilla-300);
}
}
.uplot-tooltip-header-container {
padding: 1rem 1rem 0 1rem;
display: flex;
flex-direction: column;
gap: 8px;
&:last-child {
padding-bottom: 1rem;
}
.uplot-tooltip-header {
font-size: 13px;
font-weight: 500;
}
}
.uplot-tooltip-divider {
width: 100%;
height: 1px;
background-color: var(--bg-ink-100);
}
.uplot-tooltip-list {
// Virtuoso absolutely positions its item rows; left: 0 prevents accidental
// horizontal offset when the scroller has padding or transform applied.
div[data-viewport-type='element'] {
left: 0;
padding: 4px 8px 4px 16px;
}
&::-webkit-scrollbar {
width: 0.3rem;
}
&::-webkit-scrollbar-track {
background: transparent;
}
&::-webkit-scrollbar-thumb {
background: var(--bg-slate-100);
border-radius: 0.5rem;
}
}
}

View File

@@ -1,70 +0,0 @@
.uplot-tooltip-container {
font-family: 'Inter';
font-size: 12px;
background: var(--bg-ink-300);
-webkit-font-smoothing: antialiased;
color: var(--bg-vanilla-100);
border-radius: 6px;
padding: 1rem 0.5rem 0.5rem 1rem;
border: 1px solid var(--bg-ink-100);
display: flex;
flex-direction: column;
gap: 8px;
&.lightMode {
background: var(--bg-vanilla-100);
color: var(--bg-ink-500);
border: 1px solid var(--bg-vanilla-300);
.uplot-tooltip-list {
&::-webkit-scrollbar-thumb {
background: var(--bg-vanilla-400);
}
}
}
.uplot-tooltip-header {
font-size: 13px;
font-weight: 500;
}
.uplot-tooltip-list-container {
overflow-y: auto;
max-height: 330px;
.uplot-tooltip-list {
&::-webkit-scrollbar {
width: 0.3rem;
}
&::-webkit-scrollbar-track {
background: transparent;
}
&::-webkit-scrollbar-thumb {
background: var(--bg-slate-100);
border-radius: 0.5rem;
}
}
}
.uplot-tooltip-item {
display: flex;
align-items: center;
gap: 8px;
margin-bottom: 4px;
.uplot-tooltip-item-marker {
border-radius: 50%;
border-width: 2px;
width: 12px;
height: 12px;
flex-shrink: 0;
}
.uplot-tooltip-item-content {
white-space: wrap;
word-break: break-all;
}
}
}

View File

@@ -7,12 +7,14 @@ import { useIsDarkMode } from 'hooks/useDarkMode';
import { useTimezone } from 'providers/Timezone';
import { TooltipProps } from '../types';
import TooltipItem from './components/TooltipItem/TooltipItem';
import './Tooltip.styles.scss';
import Styles from './Tooltip.module.scss';
const TOOLTIP_LIST_MAX_HEIGHT = 330;
// Fallback per-item height used for the initial size estimate before
// Virtuoso reports the real total height via totalListHeightChanged.
const TOOLTIP_ITEM_HEIGHT = 38;
const TOOLTIP_LIST_PADDING = 10;
const LIST_MAX_HEIGHT = 300;
export default function Tooltip({
uPlotInstance,
@@ -21,27 +23,26 @@ export default function Tooltip({
showTooltipHeader = true,
}: TooltipProps): JSX.Element {
const isDarkMode = useIsDarkMode();
const [listHeight, setListHeight] = useState(0);
const tooltipContent = content ?? [];
const { timezone: userTimezone } = useTimezone();
const [totalListHeight, setTotalListHeight] = useState(0);
const resolvedTimezone = useMemo(() => {
if (!timezone) {
return userTimezone.value;
}
return timezone.value;
}, [timezone, userTimezone]);
const tooltipContent = useMemo(() => content ?? [], [content]);
const resolvedTimezone = timezone?.value ?? userTimezone.value;
const headerTitle = useMemo(() => {
if (!showTooltipHeader) {
return null;
}
const data = uPlotInstance.data;
const cursorIdx = uPlotInstance.cursor.idx;
if (cursorIdx == null) {
return null;
}
return dayjs(data[0][cursorIdx] * 1000)
const timestamp = uPlotInstance.data[0]?.[cursorIdx];
if (timestamp == null) {
return null;
}
return dayjs(timestamp * 1000)
.tz(resolvedTimezone)
.format(DATE_TIME_FORMATS.MONTH_DATETIME_SECONDS);
}, [
@@ -51,60 +52,68 @@ export default function Tooltip({
showTooltipHeader,
]);
const virtuosoStyle = useMemo(() => {
return {
height:
listHeight > 0
? Math.min(listHeight + TOOLTIP_LIST_PADDING, TOOLTIP_LIST_MAX_HEIGHT)
: Math.min(
tooltipContent.length * TOOLTIP_ITEM_HEIGHT,
TOOLTIP_LIST_MAX_HEIGHT,
),
width: '100%',
};
}, [listHeight, tooltipContent.length]);
const activeItem = useMemo(
() => tooltipContent.find((item) => item.isActive) ?? null,
[tooltipContent],
);
// Use the measured height from Virtuoso when available; fall back to a
// per-item estimate on the first render. Math.ceil prevents a 1 px
// subpixel rounding gap from triggering a spurious scrollbar.
const virtuosoHeight = useMemo(() => {
return totalListHeight > 0
? Math.ceil(Math.min(totalListHeight, LIST_MAX_HEIGHT))
: Math.min(tooltipContent.length * TOOLTIP_ITEM_HEIGHT, LIST_MAX_HEIGHT);
}, [totalListHeight, tooltipContent.length]);
const showHeader = showTooltipHeader || activeItem != null;
// With a single series the active item is fully represented in the header —
// hide the divider and list to avoid showing a duplicate row.
const showList = tooltipContent.length > 1;
const showDivider = showList && showHeader;
return (
<div
className={cx(
'uplot-tooltip-container',
isDarkMode ? 'darkMode' : 'lightMode',
)}
className={cx(Styles.uplotTooltipContainer, !isDarkMode && Styles.lightMode)}
data-testid="uplot-tooltip-container"
>
{showTooltipHeader && (
<div className="uplot-tooltip-header" data-testid="uplot-tooltip-header">
<span>{headerTitle}</span>
{showHeader && (
<div className={Styles.uplotTooltipHeaderContainer}>
{showTooltipHeader && headerTitle && (
<div
className={Styles.uplotTooltipHeader}
data-testid="uplot-tooltip-header"
>
<span>{headerTitle}</span>
</div>
)}
{activeItem && (
<TooltipItem
item={activeItem}
isItemActive={true}
containerTestId="uplot-tooltip-pinned"
markerTestId="uplot-tooltip-pinned-marker"
contentTestId="uplot-tooltip-pinned-content"
/>
)}
</div>
)}
<div className="uplot-tooltip-list-container">
{tooltipContent.length > 0 ? (
<Virtuoso
className="uplot-tooltip-list"
data-testid="uplot-tooltip-list"
data={tooltipContent}
style={virtuosoStyle}
totalListHeightChanged={setListHeight}
itemContent={(_, item): JSX.Element => (
<div className="uplot-tooltip-item" data-testid="uplot-tooltip-item">
<div
className="uplot-tooltip-item-marker"
style={{ borderColor: item.color }}
data-is-legend-marker={true}
data-testid="uplot-tooltip-item-marker"
/>
<div
className="uplot-tooltip-item-content"
style={{ color: item.color, fontWeight: item.isActive ? 700 : 400 }}
data-testid="uplot-tooltip-item-content"
>
{item.label}: {item.tooltipValue}
</div>
</div>
)}
/>
) : null}
</div>
{showDivider && <span className={Styles.uplotTooltipDivider} />}
{showList && (
<Virtuoso
className={Styles.uplotTooltipList}
data-testid="uplot-tooltip-list"
data={tooltipContent}
style={{ height: virtuosoHeight, width: '100%' }}
totalListHeightChanged={setTotalListHeight}
itemContent={(_, item): JSX.Element => (
<TooltipItem item={item} isItemActive={false} />
)}
/>
)}
</div>
);
}

View File

@@ -133,46 +133,30 @@ describe('Tooltip', () => {
expect(screen.queryByText(unexpectedTitle)).not.toBeInTheDocument();
});
it('renders lightMode class when dark mode is disabled', () => {
it('renders single active item in header only, without a list', () => {
const uPlotInstance = createUPlotInstance(null);
mockUseIsDarkMode.mockReturnValue(false);
renderTooltip({ uPlotInstance });
const container = screen.getByTestId('uplot-tooltip-container');
expect(container).toHaveClass('lightMode');
expect(container).not.toHaveClass('darkMode');
});
it('renders darkMode class when dark mode is enabled', () => {
const uPlotInstance = createUPlotInstance(null);
mockUseIsDarkMode.mockReturnValue(true);
renderTooltip({ uPlotInstance });
const container = screen.getByTestId('uplot-tooltip-container');
expect(container).toHaveClass('darkMode');
expect(container).not.toHaveClass('lightMode');
});
it('renders tooltip items when content is provided', () => {
const uPlotInstance = createUPlotInstance(null);
const content = [createTooltipContent()];
const content = [createTooltipContent({ isActive: true })];
renderTooltip({ uPlotInstance, content });
const list = screen.queryByTestId('uplot-tooltip-list');
// Active item is shown in the header, not duplicated in a list
expect(screen.queryByTestId('uplot-tooltip-list')).toBeNull();
expect(screen.getByTestId('uplot-tooltip-pinned')).toBeInTheDocument();
const pinnedContent = screen.getByTestId('uplot-tooltip-pinned-content');
expect(pinnedContent).toHaveTextContent('Series A');
expect(pinnedContent).toHaveTextContent('10');
});
expect(list).not.toBeNull();
it('renders list when multiple series are present', () => {
const uPlotInstance = createUPlotInstance(null);
const content = [
createTooltipContent({ isActive: true }),
createTooltipContent({ label: 'Series B', isActive: false }),
];
const marker = screen.getByTestId('uplot-tooltip-item-marker');
const itemContent = screen.getByTestId('uplot-tooltip-item-content');
renderTooltip({ uPlotInstance, content });
expect(marker).toHaveStyle({ borderColor: '#ff0000' });
expect(itemContent).toHaveStyle({ color: '#ff0000', fontWeight: '700' });
expect(itemContent).toHaveTextContent('Series A: 10');
expect(screen.getByTestId('uplot-tooltip-list')).toBeInTheDocument();
});
it('does not render tooltip list when content is empty', () => {
@@ -192,7 +176,7 @@ describe('Tooltip', () => {
renderTooltip({ uPlotInstance, content });
const list = screen.getByTestId('uplot-tooltip-list');
expect(list).toHaveStyle({ height: '210px' });
expect(list).toHaveStyle({ height: '200px' });
});
it('sets tooltip list height based on content length when Virtuoso reports 0 height', () => {

View File

@@ -189,7 +189,7 @@ describe('Tooltip utils', () => {
];
}
it('builds tooltip content with active series first', () => {
it('builds tooltip content in series-index order with isActive flag set correctly', () => {
const data: AlignedData = [[0], [10], [20], [30]];
const series = createSeriesConfig();
const dataIndexes = [null, 0, 0, 0];
@@ -206,21 +206,21 @@ describe('Tooltip utils', () => {
});
expect(result).toHaveLength(2);
// Active (series index 2) should come first
// Series are returned in series-index order (A=index 1 before B=index 2)
expect(result[0]).toMatchObject<Partial<TooltipContentItem>>({
label: 'B',
value: 20,
tooltipValue: 'formatted-20',
color: 'color-2',
isActive: true,
});
expect(result[1]).toMatchObject<Partial<TooltipContentItem>>({
label: 'A',
value: 10,
tooltipValue: 'formatted-10',
color: '#ff0000',
isActive: false,
});
expect(result[1]).toMatchObject<Partial<TooltipContentItem>>({
label: 'B',
value: 20,
tooltipValue: 'formatted-20',
color: 'color-2',
isActive: true,
});
});
it('skips series with null data index or non-finite values', () => {
@@ -273,5 +273,31 @@ describe('Tooltip utils', () => {
expect(result[0].value).toBe(30);
expect(result[1].value).toBe(30);
});
it('returns items in series-index order', () => {
// Series values in non-sorted order: 3, 1, 4, 2
const data: AlignedData = [[0], [3], [1], [4], [2]];
const series: Series[] = [
{ label: 'x', show: true } as Series,
{ label: 'C', show: true, stroke: '#aaaaaa' } as Series,
{ label: 'A', show: true, stroke: '#bbbbbb' } as Series,
{ label: 'D', show: true, stroke: '#cccccc' } as Series,
{ label: 'B', show: true, stroke: '#dddddd' } as Series,
];
const dataIndexes = [null, 0, 0, 0, 0];
const u = createUPlotInstance();
const result = buildTooltipContent({
data,
series,
dataIndexes,
activeSeriesIndex: null,
uPlotInstance: u,
yAxisUnit,
decimalPrecision,
});
expect(result.map((item) => item.value)).toEqual([3, 1, 4, 2]);
});
});
});

View File

@@ -0,0 +1,36 @@
.uplot-tooltip-item {
display: flex;
align-items: center;
gap: 8px;
padding: 4px 0;
.uplot-tooltip-item-marker {
border-radius: 50%;
border-style: solid;
border-width: 2px;
width: 12px;
height: 12px;
flex-shrink: 0;
}
.uplot-tooltip-item-content {
width: 100%;
display: flex;
align-items: center;
gap: 8px;
justify-content: space-between;
.uplot-tooltip-item-label {
white-space: normal;
overflow-wrap: anywhere;
}
&-separator {
flex: 1;
border-width: 0.5px;
border-style: dashed;
min-width: 24px;
opacity: 0.5;
}
}
}

View File

@@ -0,0 +1,49 @@
import { TooltipContentItem } from '../../../types';
import Styles from './TooltipItem.module.scss';
interface TooltipItemProps {
item: TooltipContentItem;
isItemActive: boolean;
containerTestId?: string;
markerTestId?: string;
contentTestId?: string;
}
export default function TooltipItem({
item,
isItemActive,
containerTestId = 'uplot-tooltip-item',
markerTestId = 'uplot-tooltip-item-marker',
contentTestId = 'uplot-tooltip-item-content',
}: TooltipItemProps): JSX.Element {
return (
<div
className={Styles.uplotTooltipItem}
style={{
opacity: isItemActive ? 1 : 0.7,
fontWeight: isItemActive ? 700 : 400,
}}
data-testid={containerTestId}
>
<div
className={Styles.uplotTooltipItemMarker}
style={{ borderColor: item.color }}
data-is-legend-marker={true}
data-testid={markerTestId}
/>
<div
className={Styles.uplotTooltipItemContent}
style={{ color: item.color }}
data-testid={contentTestId}
>
<span className={Styles.uplotTooltipItemLabel}>{item.label}</span>
<span
className={Styles.uplotTooltipItemContentSeparator}
style={{ borderColor: item.color }}
/>
<span>{item.tooltipValue}</span>
</div>
</div>
);
}

View File

@@ -38,16 +38,16 @@ export function getTooltipBaseValue({
// When series are hidden, we must use the next *visible* series, not index+1,
// since hidden series keep raw values and would produce negative/wrong results.
if (isStackedBarChart && baseValue !== null && series) {
let nextVisibleIdx = -1;
for (let j = index + 1; j < series.length; j++) {
if (series[j]?.show) {
nextVisibleIdx = j;
let nextVisibleSeriesIdx = -1;
for (let seriesIdx = index + 1; seriesIdx < series.length; seriesIdx++) {
if (series[seriesIdx]?.show) {
nextVisibleSeriesIdx = seriesIdx;
break;
}
}
if (nextVisibleIdx >= 1) {
const nextValue = data[nextVisibleIdx][dataIndex] ?? 0;
baseValue = baseValue - nextValue;
if (nextVisibleSeriesIdx >= 1) {
const nextStackedValue = data[nextVisibleSeriesIdx][dataIndex] ?? 0;
baseValue = baseValue - nextStackedValue;
}
}
return baseValue;
@@ -72,16 +72,15 @@ export function buildTooltipContent({
decimalPrecision?: PrecisionOption;
isStackedBarChart?: boolean;
}): TooltipContentItem[] {
const active: TooltipContentItem[] = [];
const rest: TooltipContentItem[] = [];
const items: TooltipContentItem[] = [];
for (let index = 1; index < series.length; index += 1) {
const s = series[index];
if (!s?.show) {
for (let seriesIndex = 1; seriesIndex < series.length; seriesIndex += 1) {
const seriesItem = series[seriesIndex];
if (!seriesItem?.show) {
continue;
}
const dataIndex = dataIndexes[index];
const dataIndex = dataIndexes[seriesIndex];
// Skip series with no data at the current cursor position
if (dataIndex === null) {
continue;
@@ -89,30 +88,22 @@ export function buildTooltipContent({
const baseValue = getTooltipBaseValue({
data,
index,
index: seriesIndex,
dataIndex,
isStackedBarChart,
series,
});
const isActive = index === activeSeriesIndex;
if (Number.isFinite(baseValue) && baseValue !== null) {
const item: TooltipContentItem = {
label: String(s.label ?? ''),
items.push({
label: String(seriesItem.label ?? ''),
value: baseValue,
tooltipValue: getToolTipValue(baseValue, yAxisUnit, decimalPrecision),
color: resolveSeriesColor(s.stroke, uPlotInstance, index),
isActive,
};
if (isActive) {
active.push(item);
} else {
rest.push(item);
}
color: resolveSeriesColor(seriesItem.stroke, uPlotInstance, seriesIndex),
isActive: seriesIndex === activeSeriesIndex,
});
}
}
return [...active, ...rest];
return items;
}

View File

@@ -36,8 +36,8 @@ const HOVER_DISMISS_DELAY_MS = 100;
export default function TooltipPlugin({
config,
render,
maxWidth = 300,
maxHeight = 400,
maxWidth = 450,
maxHeight = 600,
syncMode = DashboardCursorSync.None,
syncKey = '_tooltip_sync_global_',
pinnedTooltipElement,

View File

@@ -97,6 +97,9 @@ export default defineConfig(
javascriptEnabled: true,
},
},
modules: {
localsConvention: 'camelCaseOnly',
},
},
define: {
// TODO: Remove this in favor of import.meta.env

View File

@@ -10,6 +10,26 @@ import (
)
func (provider *provider) addCloudIntegrationRoutes(router *mux.Router) error {
if err := router.Handle("/api/v1/cloud_integrations/{cloud_provider}/credentials", handler.New(
provider.authZ.AdminAccess(provider.cloudIntegrationHandler.GetConnectionCredentials),
handler.OpenAPIDef{
ID: "GetConnectionCredentials",
Tags: []string{"cloudintegration"},
Summary: "Get connection credentials",
Description: "This endpoint retrieves the connection credentials required for integration",
Request: nil,
RequestContentType: "application/json",
Response: new(citypes.SignozCredentials),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
},
)).Methods(http.MethodGet).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v1/cloud_integrations/{cloud_provider}/accounts", handler.New(
provider.authZ.AdminAccess(provider.cloudIntegrationHandler.CreateAccount),
handler.OpenAPIDef{
@@ -59,7 +79,7 @@ func (provider *provider) addCloudIntegrationRoutes(router *mux.Router) error {
Description: "This endpoint gets an account for the specified cloud provider",
Request: nil,
RequestContentType: "",
Response: new(citypes.GettableAccount),
Response: new(citypes.Account),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound},
@@ -139,7 +159,7 @@ func (provider *provider) addCloudIntegrationRoutes(router *mux.Router) error {
Description: "This endpoint gets a service for the specified cloud provider",
Request: nil,
RequestContentType: "",
Response: new(citypes.GettableService),
Response: new(citypes.Service),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{},
@@ -150,7 +170,7 @@ func (provider *provider) addCloudIntegrationRoutes(router *mux.Router) error {
return err
}
if err := router.Handle("/api/v1/cloud_integrations/{cloud_provider}/services/{service_id}", handler.New(
if err := router.Handle("/api/v1/cloud_integrations/{cloud_provider}/accounts/{id}/services/{service_id}", handler.New(
provider.authZ.AdminAccess(provider.cloudIntegrationHandler.UpdateService),
handler.OpenAPIDef{
ID: "UpdateService",

View File

@@ -1,33 +0,0 @@
package signozapiserver
import (
"net/http"
"github.com/SigNoz/signoz/pkg/http/handler"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
"github.com/gorilla/mux"
)
func (provider *provider) addInfraMonitoringRoutes(router *mux.Router) error {
if err := router.Handle("/api/v2/infra-monitoring/hosts/list", handler.New(
provider.authZ.ViewAccess(provider.infraMonitoringHandler.HostsList),
handler.OpenAPIDef{
ID: "HostsList",
Tags: []string{"infra-monitoring"},
Summary: "List Hosts for Infra Monitoring",
Description: "This endpoint returns a list of hosts along with other information for each of them",
Request: new(inframonitoringtypes.HostsListRequest),
RequestContentType: "application/json",
Response: new(inframonitoringtypes.HostsListResponse),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized, http.StatusInternalServerError},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
})).Methods(http.MethodPost).GetError(); err != nil {
return err
}
return nil
}

View File

@@ -16,7 +16,6 @@ import (
"github.com/SigNoz/signoz/pkg/modules/dashboard"
"github.com/SigNoz/signoz/pkg/modules/fields"
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
"github.com/SigNoz/signoz/pkg/modules/inframonitoring"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/modules/preference"
"github.com/SigNoz/signoz/pkg/modules/promote"
@@ -48,7 +47,6 @@ type provider struct {
dashboardModule dashboard.Module
dashboardHandler dashboard.Handler
metricsExplorerHandler metricsexplorer.Handler
infraMonitoringHandler inframonitoring.Handler
gatewayHandler gateway.Handler
fieldsHandler fields.Handler
authzHandler authz.Handler
@@ -75,7 +73,6 @@ func NewFactory(
dashboardModule dashboard.Module,
dashboardHandler dashboard.Handler,
metricsExplorerHandler metricsexplorer.Handler,
infraMonitoringHandler inframonitoring.Handler,
gatewayHandler gateway.Handler,
fieldsHandler fields.Handler,
authzHandler authz.Handler,
@@ -105,7 +102,6 @@ func NewFactory(
dashboardModule,
dashboardHandler,
metricsExplorerHandler,
infraMonitoringHandler,
gatewayHandler,
fieldsHandler,
authzHandler,
@@ -137,7 +133,6 @@ func newProvider(
dashboardModule dashboard.Module,
dashboardHandler dashboard.Handler,
metricsExplorerHandler metricsexplorer.Handler,
infraMonitoringHandler inframonitoring.Handler,
gatewayHandler gateway.Handler,
fieldsHandler fields.Handler,
authzHandler authz.Handler,
@@ -167,7 +162,6 @@ func newProvider(
dashboardModule: dashboardModule,
dashboardHandler: dashboardHandler,
metricsExplorerHandler: metricsExplorerHandler,
infraMonitoringHandler: infraMonitoringHandler,
gatewayHandler: gatewayHandler,
fieldsHandler: fieldsHandler,
authzHandler: authzHandler,
@@ -234,10 +228,6 @@ func (provider *provider) AddToRouter(router *mux.Router) error {
return err
}
if err := provider.addInfraMonitoringRoutes(router); err != nil {
return err
}
if err := provider.addGatewayRoutes(router); err != nil {
return err
}

View File

@@ -10,19 +10,21 @@ import (
)
type Module interface {
GetConnectionCredentials(ctx context.Context, orgID valuer.UUID, provider citypes.CloudProviderType) (*citypes.SignozCredentials, error)
CreateAccount(ctx context.Context, account *citypes.Account) error
// GetAccount returns cloud integration account
GetAccount(ctx context.Context, orgID, accountID valuer.UUID) (*citypes.Account, error)
GetAccount(ctx context.Context, orgID, accountID valuer.UUID, provider citypes.CloudProviderType) (*citypes.Account, error)
// ListAccounts lists accounts where agent is connected
ListAccounts(ctx context.Context, orgID valuer.UUID) ([]*citypes.Account, error)
ListAccounts(ctx context.Context, orgID valuer.UUID, provider citypes.CloudProviderType) ([]*citypes.Account, error)
// UpdateAccount updates the cloud integration account for a specific organization.
UpdateAccount(ctx context.Context, account *citypes.Account) error
// DisconnectAccount soft deletes/removes a cloud integration account.
DisconnectAccount(ctx context.Context, orgID, accountID valuer.UUID) error
DisconnectAccount(ctx context.Context, orgID, accountID valuer.UUID, provider citypes.CloudProviderType) error
// GetConnectionArtifact returns cloud provider specific connection information,
// client side handles how this information is shown
@@ -30,17 +32,20 @@ type Module interface {
// ListServicesMetadata returns the list of services metadata for a cloud provider attached with the integrationID.
// This just returns a summary of the service and not the whole service definition
ListServicesMetadata(ctx context.Context, orgID valuer.UUID, integrationID *valuer.UUID) ([]*citypes.ServiceMetadata, error)
ListServicesMetadata(ctx context.Context, orgID valuer.UUID, provider citypes.CloudProviderType, integrationID *valuer.UUID) ([]*citypes.ServiceMetadata, error)
// GetService returns service definition details for a serviceID. This returns config and
// other details required to show in service details page on web client.
GetService(ctx context.Context, orgID valuer.UUID, integrationID *valuer.UUID, serviceID string) (*citypes.Service, error)
GetService(ctx context.Context, orgID valuer.UUID, integrationID *valuer.UUID, serviceID citypes.ServiceID, provider citypes.CloudProviderType) (*citypes.Service, error)
// CreateService creates a new service for a cloud integration account.
CreateService(ctx context.Context, orgID valuer.UUID, service *citypes.CloudIntegrationService, provider citypes.CloudProviderType) error
// UpdateService updates cloud integration service
UpdateService(ctx context.Context, orgID valuer.UUID, service *citypes.CloudIntegrationService) error
UpdateService(ctx context.Context, orgID valuer.UUID, service *citypes.CloudIntegrationService, provider citypes.CloudProviderType) error
// AgentCheckIn is called by agent to heartbeat and get latest config in response.
AgentCheckIn(ctx context.Context, orgID valuer.UUID, req *citypes.AgentCheckInRequest) (*citypes.AgentCheckInResponse, error)
AgentCheckIn(ctx context.Context, orgID valuer.UUID, provider citypes.CloudProviderType, req *citypes.AgentCheckInRequest) (*citypes.AgentCheckInResponse, error)
// GetDashboardByID returns dashboard JSON for a given dashboard id.
// this only returns the dashboard when the service (embedded in dashboard id) is enabled
@@ -52,7 +57,22 @@ type Module interface {
ListDashboards(ctx context.Context, orgID valuer.UUID) ([]*dashboardtypes.Dashboard, error)
}
type CloudProviderModule interface {
GetConnectionArtifact(ctx context.Context, account *citypes.Account, req *citypes.ConnectionArtifactRequest) (*citypes.ConnectionArtifact, error)
// ListServiceDefinitions returns all service definitions for this cloud provider.
ListServiceDefinitions(ctx context.Context) ([]*citypes.ServiceDefinition, error)
// GetServiceDefinition returns the service definition for the given service ID.
GetServiceDefinition(ctx context.Context, serviceID citypes.ServiceID) (*citypes.ServiceDefinition, error)
// BuildIntegrationConfig compiles the provider-specific integration config from the account
// and list of configured services. This is the config returned to the agent on check-in.
BuildIntegrationConfig(ctx context.Context, account *citypes.Account, services []*citypes.StorableCloudIntegrationService) (*citypes.ProviderIntegrationConfig, error)
}
type Handler interface {
GetConnectionCredentials(http.ResponseWriter, *http.Request)
CreateAccount(http.ResponseWriter, *http.Request)
ListAccounts(http.ResponseWriter, *http.Request)
GetAccount(http.ResponseWriter, *http.Request)

View File

@@ -12,6 +12,10 @@ func NewHandler() cloudintegration.Handler {
return &handler{}
}
func (handler *handler) GetConnectionCredentials(http.ResponseWriter, *http.Request) {
panic("unimplemented")
}
func (handler *handler) CreateAccount(writer http.ResponseWriter, request *http.Request) {
// TODO implement me
panic("implement me")

View File

@@ -34,6 +34,25 @@ func (store *store) GetAccountByID(ctx context.Context, orgID, id valuer.UUID, p
return account, nil
}
func (store *store) GetConnectedAccount(ctx context.Context, orgID valuer.UUID, provider cloudintegrationtypes.CloudProviderType, providerAccountID string) (*cloudintegrationtypes.StorableCloudIntegration, error) {
account := new(cloudintegrationtypes.StorableCloudIntegration)
err := store.
store.
BunDBCtx(ctx).
NewSelect().
Model(account).
Where("org_id = ?", orgID).
Where("provider = ?", provider).
Where("account_id = ?", providerAccountID).
Where("last_agent_report IS NOT NULL").
Where("removed_at IS NULL").
Scan(ctx)
if err != nil {
return nil, store.store.WrapNotFoundErrf(err, cloudintegrationtypes.ErrCodeCloudIntegrationNotFound, "connected account with provider account id %s not found", providerAccountID)
}
return account, nil
}
func (store *store) ListConnectedAccounts(ctx context.Context, orgID valuer.UUID, provider cloudintegrationtypes.CloudProviderType) ([]*cloudintegrationtypes.StorableCloudIntegration, error) {
var accounts []*cloudintegrationtypes.StorableCloudIntegration
err := store.
@@ -96,25 +115,6 @@ func (store *store) RemoveAccount(ctx context.Context, orgID, id valuer.UUID, pr
return err
}
func (store *store) GetConnectedAccount(ctx context.Context, orgID valuer.UUID, provider cloudintegrationtypes.CloudProviderType, providerAccountID string) (*cloudintegrationtypes.StorableCloudIntegration, error) {
account := new(cloudintegrationtypes.StorableCloudIntegration)
err := store.
store.
BunDBCtx(ctx).
NewSelect().
Model(account).
Where("org_id = ?", orgID).
Where("provider = ?", provider).
Where("account_id = ?", providerAccountID).
Where("last_agent_report IS NOT NULL").
Where("removed_at IS NULL").
Scan(ctx)
if err != nil {
return nil, store.store.WrapNotFoundErrf(err, cloudintegrationtypes.ErrCodeCloudIntegrationNotFound, "connected account with provider account id %s not found", providerAccountID)
}
return account, nil
}
func (store *store) GetServiceByServiceID(ctx context.Context, cloudIntegrationID valuer.UUID, serviceID cloudintegrationtypes.ServiceID) (*cloudintegrationtypes.StorableCloudIntegrationService, error) {
service := new(cloudintegrationtypes.StorableCloudIntegrationService)
err := store.
@@ -172,3 +172,9 @@ func (store *store) UpdateService(ctx context.Context, service *cloudintegration
Exec(ctx)
return err
}
func (store *store) RunInTx(ctx context.Context, cb func(ctx context.Context) error) error {
return store.store.RunInTxCtx(ctx, nil, func(ctx context.Context) error {
return cb(ctx)
})
}

View File

@@ -1,33 +0,0 @@
package inframonitoring
import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
)
type Config struct {
TelemetryStore TelemetryStoreConfig `mapstructure:"telemetrystore"`
}
type TelemetryStoreConfig struct {
Threads int `mapstructure:"threads"`
}
func NewConfigFactory() factory.ConfigFactory {
return factory.NewConfigFactory(factory.MustNewName("inframonitoring"), newConfig)
}
func newConfig() factory.Config {
return Config{
TelemetryStore: TelemetryStoreConfig{
Threads: 8,
},
}
}
func (c Config) Validate() error {
if c.TelemetryStore.Threads <= 0 {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "inframonitoring.telemetrystore.threads must be positive, got %d", c.TelemetryStore.Threads)
}
return nil
}

View File

@@ -1,257 +0,0 @@
package implinframonitoring
import (
"context"
"fmt"
"strings"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/querybuilder"
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
"github.com/SigNoz/signoz/pkg/types/metrictypes"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/huandu/go-sqlbuilder"
)
const (
ResponseTypeList = "list"
ResponseTypeGroupedList = "grouped_list"
)
func (m *module) buildFilterClause(ctx context.Context, filter *qbtypes.Filter, startMillis, endMillis int64) (*sqlbuilder.WhereClause, error) {
expression := ""
if filter != nil {
expression = strings.TrimSpace(filter.Expression)
}
if expression == "" {
return sqlbuilder.NewWhereClause(), nil
}
whereClauseSelectors := querybuilder.QueryStringToKeysSelectors(expression)
for idx := range whereClauseSelectors {
whereClauseSelectors[idx].Signal = telemetrytypes.SignalMetrics
whereClauseSelectors[idx].SelectorMatchType = telemetrytypes.FieldSelectorMatchTypeExact
}
keys, _, err := m.telemetryMetadataStore.GetKeysMulti(ctx, whereClauseSelectors)
if err != nil {
return nil, err
}
opts := querybuilder.FilterExprVisitorOpts{
Context: ctx,
Logger: m.logger,
FieldMapper: m.fieldMapper,
ConditionBuilder: m.condBuilder,
FullTextColumn: &telemetrytypes.TelemetryFieldKey{Name: "metric_name", FieldContext: telemetrytypes.FieldContextMetric},
FieldKeys: keys,
StartNs: querybuilder.ToNanoSecs(uint64(startMillis)),
EndNs: querybuilder.ToNanoSecs(uint64(endMillis)),
}
whereClause, err := querybuilder.PrepareWhereClause(expression, opts)
if err != nil {
return nil, err
}
if whereClause == nil || whereClause.WhereClause == nil {
return sqlbuilder.NewWhereClause(), nil
}
return whereClause.WhereClause, nil
}
// getMetricsExistenceAndEarliestTime checks whether any of the given metric names
// have been reported, and returns the total count and the earliest first-reported timestamp.
// When count is 0, minFirstReportedUnixMilli is 0.
func (m *module) getMetricsExistenceAndEarliestTime(ctx context.Context, metricNames []string) (uint64, uint64, error) {
if len(metricNames) == 0 {
return 0, 0, nil
}
sb := sqlbuilder.NewSelectBuilder()
sb.Select("count(*) AS cnt", "min(first_reported_unix_milli) AS min_first_reported")
sb.From(fmt.Sprintf("%s.%s", telemetrymetrics.DBName, telemetrymetrics.AttributesMetadataTableName))
sb.Where(sb.In("metric_name", sqlbuilder.List(metricNames)))
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
var count, minFirstReported uint64
err := m.telemetryStore.ClickhouseDB().QueryRow(ctx, query, args...).Scan(&count, &minFirstReported)
if err != nil {
return 0, 0, err
}
return count, minFirstReported, nil
}
// getMetadata fetches the latest values of additionalCols for each unique combination of groupBy keys,
// within the given time range and metric names. It uses argMax(tuple(...), unix_milli) to ensure
// we always pick attribute values from the latest timestamp for each group.
//
// The returned map has a composite key of groupBy column values joined by "\x00" (null byte),
// mapping to a flat map of col_name -> col_value (includes both groupBy and additional cols).
func (m *module) getMetadata(
ctx context.Context,
metricNames []string,
groupBy []qbtypes.GroupByKey,
additionalCols []string,
filter *qbtypes.Filter,
startMs, endMs int64,
) (map[string]map[string]string, error) {
if len(metricNames) == 0 {
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "metricNames must not be empty")
}
if len(groupBy) == 0 {
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "groupBy must not be empty")
}
// Pick the optimal timeseries table based on time range; also get adjusted start.
adjustedStart, adjustedEnd, distributedTableName, _ := telemetrymetrics.WhichTSTableToUse(
uint64(startMs), uint64(endMs), nil,
)
// Build a fingerprint subquery against the samples table using the original
// (non-adjusted) time range. The time_series tables are ReplacingMergeTrees
// with bucketed granularity, so WhichTSTableToUse widens the window — this
// subquery restricts to fingerprints actually active in the requested range.
samplesTableName := telemetrymetrics.WhichSamplesTableToUse(
uint64(startMs), uint64(endMs),
metrictypes.UnspecifiedType,
metrictypes.TimeAggregationUnspecified,
nil,
)
fpSB := sqlbuilder.NewSelectBuilder()
fpSB.Select("DISTINCT fingerprint")
fpSB.From(fmt.Sprintf("%s.%s", telemetrymetrics.DBName, samplesTableName))
fpSB.Where(
fpSB.In("metric_name", sqlbuilder.List(metricNames)),
fpSB.GE("unix_milli", startMs),
fpSB.L("unix_milli", endMs),
)
// Flatten groupBy keys to string names for SQL expressions and result scanning.
groupByCols := make([]string, len(groupBy))
for i, key := range groupBy {
groupByCols[i] = key.Name
}
allCols := append(groupByCols, additionalCols...)
// --- Build inner query ---
// Inner SELECT columns: JSONExtractString for each groupBy col + argMax(tuple(...)) for additional cols
innerSelectCols := make([]string, 0, len(groupByCols)+1)
for _, col := range groupByCols {
innerSelectCols = append(innerSelectCols,
fmt.Sprintf("JSONExtractString(labels, '%s') AS `%s`", col, col),
)
}
// Build the argMax(tuple(...), unix_milli) expression for all additional cols
if len(additionalCols) > 0 {
tupleArgs := make([]string, 0, len(additionalCols))
for _, col := range additionalCols {
tupleArgs = append(tupleArgs, fmt.Sprintf("JSONExtractString(labels, '%s')", col))
}
innerSelectCols = append(innerSelectCols,
fmt.Sprintf("argMax(tuple(%s), unix_milli) AS latest_attrs", strings.Join(tupleArgs, ", ")),
)
}
innerSB := sqlbuilder.NewSelectBuilder()
innerSB.Select(innerSelectCols...)
innerSB.From(fmt.Sprintf("%s.%s", telemetrymetrics.DBName, distributedTableName))
innerSB.Where(
innerSB.In("metric_name", sqlbuilder.List(metricNames)),
innerSB.GE("unix_milli", adjustedStart),
innerSB.L("unix_milli", adjustedEnd),
fmt.Sprintf("fingerprint GLOBAL IN (%s)", innerSB.Var(fpSB)), // TODO(nikhilmantri0902): check if this can be modified to be used with local table.
)
// Apply optional filter expression
if filter != nil && strings.TrimSpace(filter.Expression) != "" {
filterClause, err := m.buildFilterClause(ctx, filter, startMs, endMs)
if err != nil {
return nil, err
}
if filterClause != nil {
innerSB.AddWhereClause(sqlbuilder.CopyWhereClause(filterClause))
}
}
groupByAliases := make([]string, 0, len(groupByCols))
for _, col := range groupByCols {
groupByAliases = append(groupByAliases, fmt.Sprintf("`%s`", col))
}
innerSB.GroupBy(groupByAliases...)
innerQuery, innerArgs := innerSB.BuildWithFlavor(sqlbuilder.ClickHouse)
// --- Build outer query ---
// Outer SELECT columns: groupBy cols directly + tupleElement(latest_attrs, N) for each additionalCol
outerSelectCols := make([]string, 0, len(allCols))
for _, col := range groupByCols {
outerSelectCols = append(outerSelectCols, fmt.Sprintf("`%s`", col))
}
for i, col := range additionalCols {
outerSelectCols = append(outerSelectCols,
fmt.Sprintf("tupleElement(latest_attrs, %d) AS `%s`", i+1, col),
)
}
outerSB := sqlbuilder.NewSelectBuilder()
outerSB.Select(outerSelectCols...)
outerSB.From(fmt.Sprintf("(%s)", innerQuery))
outerQuery, _ := outerSB.BuildWithFlavor(sqlbuilder.ClickHouse)
// All ? params are in innerArgs; outer query introduces none of its own.
rows, err := m.telemetryStore.ClickhouseDB().Query(ctx, outerQuery, innerArgs...)
if err != nil {
return nil, err
}
defer rows.Close()
result := make(map[string]map[string]string)
for rows.Next() {
row := make([]string, len(allCols))
scanPtrs := make([]any, len(row))
for i := range row {
scanPtrs[i] = &row[i]
}
if err := rows.Scan(scanPtrs...); err != nil {
return nil, err
}
compositeKey := compositeKeyFromList(row[:len(groupByCols)])
attrMap := make(map[string]string, len(allCols))
for i, col := range allCols {
attrMap[col] = row[i]
}
result[compositeKey] = attrMap
}
if err := rows.Err(); err != nil {
return nil, err
}
return result, nil
}
func (m *module) validateOrderBy(orderBy *qbtypes.OrderBy, orderByToQueryNamesMap map[string][]string) error {
if orderBy == nil {
return nil
}
if _, exists := orderByToQueryNamesMap[orderBy.Key.Name]; !exists {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by key: %s", orderBy.Key.Name)
}
if orderBy.Direction != qbtypes.OrderDirectionAsc && orderBy.Direction != qbtypes.OrderDirectionDesc {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by direction: %s", orderBy.Direction)
}
return nil
}

View File

@@ -1,48 +0,0 @@
package implinframonitoring
import (
"net/http"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/binding"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/modules/inframonitoring"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type handler struct {
module inframonitoring.Module
}
// NewHandler returns an inframonitoring.Handler implementation.
func NewHandler(m inframonitoring.Module) inframonitoring.Handler {
return &handler{
module: m,
}
}
func (h *handler) HostsList(rw http.ResponseWriter, req *http.Request) {
claims, err := authtypes.ClaimsFromContext(req.Context())
if err != nil {
render.Error(rw, err)
return
}
orgID := valuer.MustNewUUID(claims.OrgID)
var parsedReq inframonitoringtypes.HostsListRequest
if err := binding.JSON.BindBody(req.Body, &parsedReq); err != nil {
render.Error(rw, errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "failed to parse request body"))
return
}
result, err := h.module.HostsList(req.Context(), orgID, &parsedReq)
if err != nil {
render.Error(rw, err)
return
}
render.Success(rw, http.StatusOK, result)
}

View File

@@ -1,286 +0,0 @@
package implinframonitoring
import (
"fmt"
"sort"
"strings"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
)
type rankedGroup struct {
labels map[string]string
value float64
}
func isKeyInGroupByAttrs(groupByAttrs []qbtypes.GroupByKey, key string) bool {
for _, groupBy := range groupByAttrs {
if groupBy.Name == key {
return true
}
}
return false
}
func mergeFilterExpressions(queryFilterExpr, reqFilterExpr string) string {
queryFilterExpr = strings.TrimSpace(queryFilterExpr)
reqFilterExpr = strings.TrimSpace(reqFilterExpr)
if queryFilterExpr == "" {
return reqFilterExpr
}
if reqFilterExpr == "" {
return queryFilterExpr
}
return fmt.Sprintf("(%s) AND (%s)", queryFilterExpr, reqFilterExpr)
}
// compositeKeyFromList builds a composite key by joining the given parts
// with a null byte separator. This is the canonical way to construct
// composite keys for group identification across the infra monitoring module.
func compositeKeyFromList(parts []string) string {
return strings.Join(parts, "\x00")
}
// compositeKeyFromLabels builds a composite key from a label map by extracting
// the value for each groupBy key in order and joining them via compositeKeyFromList.
func compositeKeyFromLabels(labels map[string]string, groupBy []qbtypes.GroupByKey) string {
parts := make([]string, len(groupBy))
for i, key := range groupBy {
parts[i] = labels[key.Name]
}
return compositeKeyFromList(parts)
}
// parseAndSortGroups extracts group label maps from a ScalarData response and
// sorts them by the ranking query's aggregation value.
func parseAndSortGroups(
resp *qbtypes.QueryRangeResponse,
rankingQueryName string,
groupBy []qbtypes.GroupByKey,
direction qbtypes.OrderDirection,
) []rankedGroup {
if resp == nil || len(resp.Data.Results) == 0 {
return nil
}
// Find the ScalarData that contains the ranking column.
var sd *qbtypes.ScalarData
for _, r := range resp.Data.Results {
candidate, ok := r.(*qbtypes.ScalarData)
if !ok || candidate == nil {
continue
}
for _, col := range candidate.Columns {
if col.Type == qbtypes.ColumnTypeAggregation && col.QueryName == rankingQueryName {
sd = candidate
break
}
}
if sd != nil {
break
}
}
if sd == nil || len(sd.Data) == 0 {
return nil
}
groupColIndices := make(map[string]int)
rankingColIdx := -1
for i, col := range sd.Columns {
if col.Type == qbtypes.ColumnTypeGroup {
groupColIndices[col.Name] = i
}
if col.Type == qbtypes.ColumnTypeAggregation && col.QueryName == rankingQueryName {
rankingColIdx = i
}
}
if rankingColIdx == -1 {
return nil
}
groups := make([]rankedGroup, 0, len(sd.Data))
for _, row := range sd.Data {
labels := make(map[string]string, len(groupBy))
for _, key := range groupBy {
if idx, ok := groupColIndices[key.Name]; ok && idx < len(row) {
labels[key.Name] = fmt.Sprintf("%v", row[idx])
}
}
var value float64
if rankingColIdx < len(row) {
if v, ok := row[rankingColIdx].(float64); ok {
value = v
}
}
groups = append(groups, rankedGroup{labels: labels, value: value})
}
sort.Slice(groups, func(i, j int) bool {
if direction == qbtypes.OrderDirectionAsc {
return groups[i].value < groups[j].value
}
return groups[i].value > groups[j].value
})
return groups
}
// paginateWithBackfill returns the page of groups for [offset, offset+limit).
// The virtual sorted list is: metric-ranked groups first, then metadata-only
// groups (those in metadataMap but not in metric results) sorted alphabetically.
func paginateWithBackfill(
metricGroups []rankedGroup,
metadataMap map[string]map[string]string,
groupBy []qbtypes.GroupByKey,
offset, limit int,
) []map[string]string {
metricKeySet := make(map[string]bool, len(metricGroups))
for _, g := range metricGroups {
metricKeySet[compositeKeyFromLabels(g.labels, groupBy)] = true
}
metadataOnlyKeys := make([]string, 0)
for compositeKey := range metadataMap {
if !metricKeySet[compositeKey] {
metadataOnlyKeys = append(metadataOnlyKeys, compositeKey)
}
}
sort.Strings(metadataOnlyKeys)
totalMetric := len(metricGroups)
totalAll := totalMetric + len(metadataOnlyKeys)
end := offset + limit
if end > totalAll {
end = totalAll
}
if offset >= totalAll {
return nil
}
pageGroups := make([]map[string]string, 0, end-offset)
for i := offset; i < end; i++ {
if i < totalMetric {
pageGroups = append(pageGroups, metricGroups[i].labels)
} else {
compositeKey := metadataOnlyKeys[i-totalMetric]
attrs := metadataMap[compositeKey]
labels := make(map[string]string, len(groupBy))
for _, key := range groupBy {
labels[key.Name] = attrs[key.Name]
}
pageGroups = append(pageGroups, labels)
}
}
return pageGroups
}
// buildFullQueryRequest creates a QueryRangeRequest for all metrics,
// restricted to the given page of groups via an IN filter.
// Accepts primitive fields so it can be reused across different v2 APIs
// (hosts, pods, etc.).
func buildFullQueryRequest(
start int64,
end int64,
filterExpr string,
groupBy []qbtypes.GroupByKey,
pageGroups []map[string]string,
tableListQuery *qbtypes.QueryRangeRequest,
) *qbtypes.QueryRangeRequest {
groupValues := make(map[string][]string)
for _, labels := range pageGroups {
for k, v := range labels {
groupValues[k] = append(groupValues[k], v)
}
}
inClauses := make([]string, 0, len(groupValues))
for key, values := range groupValues {
quoted := make([]string, len(values))
for i, v := range values {
quoted[i] = fmt.Sprintf("'%s'", v)
}
inClauses = append(inClauses, fmt.Sprintf("%s IN (%s)", key, strings.Join(quoted, ", ")))
}
inFilterExpr := strings.Join(inClauses, " AND ")
fullReq := &qbtypes.QueryRangeRequest{
Start: uint64(start),
End: uint64(end),
RequestType: qbtypes.RequestTypeScalar,
CompositeQuery: qbtypes.CompositeQuery{
Queries: make([]qbtypes.QueryEnvelope, 0, len(tableListQuery.CompositeQuery.Queries)),
},
}
for _, envelope := range tableListQuery.CompositeQuery.Queries {
copied := envelope
if copied.Type == qbtypes.QueryTypeBuilder {
existingExpr := ""
if f := copied.GetFilter(); f != nil {
existingExpr = f.Expression
}
merged := mergeFilterExpressions(existingExpr, filterExpr)
merged = mergeFilterExpressions(merged, inFilterExpr)
copied.SetFilter(&qbtypes.Filter{Expression: merged})
copied.SetGroupBy(groupBy)
}
fullReq.CompositeQuery.Queries = append(fullReq.CompositeQuery.Queries, copied)
}
return fullReq
}
// parseFullQueryResponse extracts per-group metric values from the full
// composite query response. Returns compositeKey -> (queryName -> value).
// Each enabled query/formula produces its own ScalarData entry in Results,
// so we iterate over all of them and merge metrics per composite key.
func parseFullQueryResponse(
resp *qbtypes.QueryRangeResponse,
groupBy []qbtypes.GroupByKey,
) map[string]map[string]float64 {
result := make(map[string]map[string]float64)
if resp == nil || len(resp.Data.Results) == 0 {
return result
}
for _, r := range resp.Data.Results {
sd, ok := r.(*qbtypes.ScalarData)
if !ok || sd == nil {
continue
}
groupColIndices := make(map[string]int)
aggCols := make(map[int]string) // col index -> query name
for i, col := range sd.Columns {
if col.Type == qbtypes.ColumnTypeGroup {
groupColIndices[col.Name] = i
}
if col.Type == qbtypes.ColumnTypeAggregation {
aggCols[i] = col.QueryName
}
}
for _, row := range sd.Data {
labels := make(map[string]string, len(groupBy))
for _, key := range groupBy {
if idx, ok := groupColIndices[key.Name]; ok && idx < len(row) {
labels[key.Name] = fmt.Sprintf("%v", row[idx])
}
}
compositeKey := compositeKeyFromLabels(labels, groupBy)
if result[compositeKey] == nil {
result[compositeKey] = make(map[string]float64)
}
for idx, queryName := range aggCols {
if idx < len(row) {
if v, ok := row[idx].(float64); ok {
result[compositeKey][queryName] = v
}
}
}
}
}
return result
}

View File

@@ -1,283 +0,0 @@
package implinframonitoring
import (
"testing"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
)
func groupByKey(name string) qbtypes.GroupByKey {
return qbtypes.GroupByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: name},
}
}
func TestIsKeyInGroupByAttrs(t *testing.T) {
tests := []struct {
name string
groupByAttrs []qbtypes.GroupByKey
key string
expectedFound bool
}{
{
name: "key present in single-element list",
groupByAttrs: []qbtypes.GroupByKey{groupByKey("host.name")},
key: "host.name",
expectedFound: true,
},
{
name: "key present in multi-element list",
groupByAttrs: []qbtypes.GroupByKey{
groupByKey("host.name"),
groupByKey("os.type"),
groupByKey("k8s.cluster.name"),
},
key: "os.type",
expectedFound: true,
},
{
name: "key at last position",
groupByAttrs: []qbtypes.GroupByKey{
groupByKey("host.name"),
groupByKey("os.type"),
},
key: "os.type",
expectedFound: true,
},
{
name: "key not in list",
groupByAttrs: []qbtypes.GroupByKey{groupByKey("host.name")},
key: "os.type",
expectedFound: false,
},
{
name: "empty group by list",
groupByAttrs: []qbtypes.GroupByKey{},
key: "host.name",
expectedFound: false,
},
{
name: "nil group by list",
groupByAttrs: nil,
key: "host.name",
expectedFound: false,
},
{
name: "empty key string",
groupByAttrs: []qbtypes.GroupByKey{groupByKey("host.name")},
key: "",
expectedFound: false,
},
{
name: "empty key matches empty-named group by key",
groupByAttrs: []qbtypes.GroupByKey{groupByKey("")},
key: "",
expectedFound: true,
},
{
name: "partial match does not count",
groupByAttrs: []qbtypes.GroupByKey{
groupByKey("host"),
},
key: "host.name",
expectedFound: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := isKeyInGroupByAttrs(tt.groupByAttrs, tt.key)
if got != tt.expectedFound {
t.Errorf("isKeyInGroupByAttrs(%v, %q) = %v, want %v",
tt.groupByAttrs, tt.key, got, tt.expectedFound)
}
})
}
}
func TestMergeFilterExpressions(t *testing.T) {
tests := []struct {
name string
queryFilterExpr string
reqFilterExpr string
expected string
}{
{
name: "both non-empty",
queryFilterExpr: "cpu > 50",
reqFilterExpr: "host.name = 'web-1'",
expected: "(cpu > 50) AND (host.name = 'web-1')",
},
{
name: "query empty, req non-empty",
queryFilterExpr: "",
reqFilterExpr: "host.name = 'web-1'",
expected: "host.name = 'web-1'",
},
{
name: "query non-empty, req empty",
queryFilterExpr: "cpu > 50",
reqFilterExpr: "",
expected: "cpu > 50",
},
{
name: "both empty",
queryFilterExpr: "",
reqFilterExpr: "",
expected: "",
},
{
name: "whitespace-only query treated as empty",
queryFilterExpr: " ",
reqFilterExpr: "host.name = 'web-1'",
expected: "host.name = 'web-1'",
},
{
name: "whitespace-only req treated as empty",
queryFilterExpr: "cpu > 50",
reqFilterExpr: " ",
expected: "cpu > 50",
},
{
name: "both whitespace-only",
queryFilterExpr: " ",
reqFilterExpr: " ",
expected: "",
},
{
name: "leading/trailing whitespace trimmed before merge",
queryFilterExpr: " cpu > 50 ",
reqFilterExpr: " mem < 80 ",
expected: "(cpu > 50) AND (mem < 80)",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := mergeFilterExpressions(tt.queryFilterExpr, tt.reqFilterExpr)
if got != tt.expected {
t.Errorf("mergeFilterExpressions(%q, %q) = %q, want %q",
tt.queryFilterExpr, tt.reqFilterExpr, got, tt.expected)
}
})
}
}
func TestCompositeKeyFromList(t *testing.T) {
tests := []struct {
name string
parts []string
expected string
}{
{
name: "single part",
parts: []string{"web-1"},
expected: "web-1",
},
{
name: "multiple parts joined with null separator",
parts: []string{"web-1", "linux", "us-east"},
expected: "web-1\x00linux\x00us-east",
},
{
name: "empty slice returns empty string",
parts: []string{},
expected: "",
},
{
name: "nil slice returns empty string",
parts: nil,
expected: "",
},
{
name: "parts with empty strings",
parts: []string{"web-1", "", "us-east"},
expected: "web-1\x00\x00us-east",
},
{
name: "all empty strings",
parts: []string{"", ""},
expected: "\x00",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := compositeKeyFromList(tt.parts)
if got != tt.expected {
t.Errorf("compositeKeyFromList(%v) = %q, want %q",
tt.parts, got, tt.expected)
}
})
}
}
func TestCompositeKeyFromLabels(t *testing.T) {
tests := []struct {
name string
labels map[string]string
groupBy []qbtypes.GroupByKey
expected string
}{
{
name: "single group-by key",
labels: map[string]string{"host.name": "web-1"},
groupBy: []qbtypes.GroupByKey{groupByKey("host.name")},
expected: "web-1",
},
{
name: "multiple group-by keys joined with null separator",
labels: map[string]string{
"host.name": "web-1",
"os.type": "linux",
},
groupBy: []qbtypes.GroupByKey{groupByKey("host.name"), groupByKey("os.type")},
expected: "web-1\x00linux",
},
{
name: "missing label yields empty segment",
labels: map[string]string{"host.name": "web-1"},
groupBy: []qbtypes.GroupByKey{groupByKey("host.name"), groupByKey("os.type")},
expected: "web-1\x00",
},
{
name: "empty labels map",
labels: map[string]string{},
groupBy: []qbtypes.GroupByKey{groupByKey("host.name")},
expected: "",
},
{
name: "empty group-by slice",
labels: map[string]string{"host.name": "web-1"},
groupBy: []qbtypes.GroupByKey{},
expected: "",
},
{
name: "nil labels map",
labels: nil,
groupBy: []qbtypes.GroupByKey{groupByKey("host.name")},
expected: "",
},
{
name: "order matches group-by order, not map iteration order",
labels: map[string]string{
"z": "last",
"a": "first",
"m": "middle",
},
groupBy: []qbtypes.GroupByKey{groupByKey("a"), groupByKey("m"), groupByKey("z")},
expected: "first\x00middle\x00last",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := compositeKeyFromLabels(tt.labels, tt.groupBy)
if got != tt.expected {
t.Errorf("compositeKeyFromLabels(%v, %v) = %q, want %q",
tt.labels, tt.groupBy, got, tt.expected)
}
})
}
}

View File

@@ -1,492 +0,0 @@
package implinframonitoring
import (
"context"
"fmt"
"slices"
"strings"
"time"
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
"github.com/SigNoz/signoz/pkg/types/metrictypes"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/huandu/go-sqlbuilder"
)
var (
hostNameAttrKey = "host.name"
)
// Helper group-by key used across all queries.
var hostNameGroupByKey = qbtypes.GroupByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: hostNameAttrKey,
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
}
var hostsTableMetricNamesList = []string{
"system.cpu.time",
"system.memory.usage",
"system.cpu.load_average.15m",
"system.filesystem.usage",
}
var hostAttrKeysForMetadata = []string{
"os.type",
}
// orderByToHostsQueryNames maps the orderBy column to the query/formula names
// from HostsTableListQuery used for ranking host groups.
var orderByToHostsQueryNames = map[string][]string{
"cpu": {"A", "B", "F1"},
"memory": {"C", "D", "F2"},
"wait": {"E", "F", "F3"},
"disk_usage": {"H", "I", "F4"},
"load15": {"G"},
}
func (m *module) newHostsTableListQuery() *qbtypes.QueryRangeRequest {
return &qbtypes.QueryRangeRequest{
RequestType: qbtypes.RequestTypeScalar,
CompositeQuery: qbtypes.CompositeQuery{
Queries: []qbtypes.QueryEnvelope{
// Query A: CPU usage logic (non-idle)
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "A",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "system.cpu.time",
Temporality: metrictypes.Cumulative,
TimeAggregation: metrictypes.TimeAggregationRate,
SpaceAggregation: metrictypes.SpaceAggregationSum,
ReduceTo: qbtypes.ReduceToAvg,
},
},
Filter: &qbtypes.Filter{
Expression: "state != 'idle'",
},
GroupBy: []qbtypes.GroupByKey{hostNameGroupByKey},
Disabled: true,
},
},
// Query B: CPU usage (all states)
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "B",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "system.cpu.time",
Temporality: metrictypes.Cumulative,
TimeAggregation: metrictypes.TimeAggregationRate,
SpaceAggregation: metrictypes.SpaceAggregationSum,
ReduceTo: qbtypes.ReduceToAvg,
},
},
GroupBy: []qbtypes.GroupByKey{hostNameGroupByKey},
Disabled: true,
},
},
// Formula F1: CPU Usage (%)
{
Type: qbtypes.QueryTypeFormula,
Spec: qbtypes.QueryBuilderFormula{
Name: "F1",
Expression: "A/B",
Legend: "CPU Usage (%)",
Disabled: false,
},
},
// Query C: Memory usage (state = used)
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "C",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "system.memory.usage",
Temporality: metrictypes.Cumulative,
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationSum,
ReduceTo: qbtypes.ReduceToAvg,
},
},
Filter: &qbtypes.Filter{
Expression: "state = 'used'",
},
GroupBy: []qbtypes.GroupByKey{hostNameGroupByKey},
Disabled: true,
},
},
// Query D: Memory usage (all states)
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "D",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "system.memory.usage",
Temporality: metrictypes.Cumulative,
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationSum,
ReduceTo: qbtypes.ReduceToAvg,
},
},
GroupBy: []qbtypes.GroupByKey{hostNameGroupByKey},
Disabled: true,
},
},
// Formula F2: Memory Usage (%)
{
Type: qbtypes.QueryTypeFormula,
Spec: qbtypes.QueryBuilderFormula{
Name: "F2",
Expression: "C/D",
Legend: "Memory Usage (%)",
Disabled: false,
},
},
// Query E: CPU Wait time (state = wait)
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "E",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "system.cpu.time",
Temporality: metrictypes.Cumulative,
TimeAggregation: metrictypes.TimeAggregationRate,
SpaceAggregation: metrictypes.SpaceAggregationSum,
ReduceTo: qbtypes.ReduceToAvg,
},
},
Filter: &qbtypes.Filter{
Expression: "state = 'wait'",
},
GroupBy: []qbtypes.GroupByKey{hostNameGroupByKey},
Disabled: true,
},
},
// Query F: CPU time (all states)
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "F",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "system.cpu.time",
Temporality: metrictypes.Cumulative,
TimeAggregation: metrictypes.TimeAggregationRate,
SpaceAggregation: metrictypes.SpaceAggregationSum,
ReduceTo: qbtypes.ReduceToAvg,
},
},
GroupBy: []qbtypes.GroupByKey{hostNameGroupByKey},
Disabled: true,
},
},
// Formula F3: CPU Wait Time (%)
{
Type: qbtypes.QueryTypeFormula,
Spec: qbtypes.QueryBuilderFormula{
Name: "F3",
Expression: "E/F",
Legend: "CPU Wait Time (%)",
Disabled: false,
},
},
// Query G: Load15
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "G",
Signal: telemetrytypes.SignalMetrics,
Legend: "CPU Load Average (15m)",
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "system.cpu.load_average.15m",
Temporality: metrictypes.Unspecified,
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationSum,
ReduceTo: qbtypes.ReduceToAvg,
},
},
GroupBy: []qbtypes.GroupByKey{hostNameGroupByKey},
Disabled: false,
},
},
// Query H: Filesystem Usage (state = used)
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "H",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "system.filesystem.usage",
Temporality: metrictypes.Cumulative,
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationSum,
ReduceTo: qbtypes.ReduceToAvg,
},
},
Filter: &qbtypes.Filter{
Expression: "state = 'used'",
},
GroupBy: []qbtypes.GroupByKey{hostNameGroupByKey},
Disabled: true,
},
},
// Query I: Filesystem Usage (all states)
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "I",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "system.filesystem.usage",
Temporality: metrictypes.Cumulative,
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationSum,
ReduceTo: qbtypes.ReduceToAvg,
},
},
GroupBy: []qbtypes.GroupByKey{hostNameGroupByKey},
Disabled: true,
},
},
// Formula F4: Disk Usage (%)
{
Type: qbtypes.QueryTypeFormula,
Spec: qbtypes.QueryBuilderFormula{
Name: "F4",
Expression: "H/I",
Legend: "Disk Usage (%)",
Disabled: false,
},
},
},
},
}
}
// getTopHostGroups runs a ranking query for the ordering metric, sorts the
// results, paginates, and backfills from metadataMap when the page extends
// past the metric-ranked groups.
func (m *module) getTopHostGroups(
ctx context.Context,
orgID valuer.UUID,
req *inframonitoringtypes.HostsListRequest,
metadataMap map[string]map[string]string,
) ([]map[string]string, error) {
orderByKey := req.OrderBy.Key.Name
queryNamesForOrderBy := orderByToHostsQueryNames[orderByKey]
// The last entry is the formula/query whose value we sort by.
rankingQueryName := queryNamesForOrderBy[len(queryNamesForOrderBy)-1]
topReq := &qbtypes.QueryRangeRequest{
Start: uint64(req.Start),
End: uint64(req.End),
RequestType: qbtypes.RequestTypeScalar,
CompositeQuery: qbtypes.CompositeQuery{
Queries: make([]qbtypes.QueryEnvelope, 0, len(queryNamesForOrderBy)),
},
}
for _, envelope := range m.newHostsTableListQuery().CompositeQuery.Queries {
if !slices.Contains(queryNamesForOrderBy, envelope.GetQueryName()) {
continue
}
copied := envelope
if copied.Type == qbtypes.QueryTypeBuilder {
existingExpr := ""
if f := copied.GetFilter(); f != nil {
existingExpr = f.Expression
}
reqFilterExpr := ""
if req.Filter != nil {
reqFilterExpr = req.Filter.Expression
}
merged := mergeFilterExpressions(existingExpr, reqFilterExpr)
copied.SetFilter(&qbtypes.Filter{Expression: merged})
copied.SetGroupBy(req.GroupBy)
}
topReq.CompositeQuery.Queries = append(topReq.CompositeQuery.Queries, copied)
}
resp, err := m.querier.QueryRange(ctx, orgID, topReq)
if err != nil {
return nil, err
}
allMetricGroups := parseAndSortGroups(resp, rankingQueryName, req.GroupBy, req.OrderBy.Direction)
return paginateWithBackfill(allMetricGroups, metadataMap, req.GroupBy, req.Offset, req.Limit), nil
}
// applyHostsActiveStatusFilter modifies req.Filter.Expression to include an IN/NOT IN
// clause based on FilterByStatus and the set of active hosts.
// Returns true if the caller should short-circuit with an empty result (ACTIVE
// requested but no hosts are active).
func (m *module) applyHostsActiveStatusFilter(req *inframonitoringtypes.HostsListRequest, activeHostsMap map[string]bool) (shouldShortCircuit bool) {
if req.FilterByStatus != inframonitoringtypes.HostStatusActive && req.FilterByStatus != inframonitoringtypes.HostStatusInactive {
return false
}
activeHosts := make([]string, 0, len(activeHostsMap))
for host := range activeHostsMap {
activeHosts = append(activeHosts, fmt.Sprintf("'%s'", host))
}
if len(activeHosts) == 0 {
return req.FilterByStatus == inframonitoringtypes.HostStatusActive
}
op := "IN"
if req.FilterByStatus == inframonitoringtypes.HostStatusInactive {
op = "NOT IN"
}
if req.Filter == nil {
req.Filter = &qbtypes.Filter{}
}
statusClause := fmt.Sprintf("%s %s (%s)", hostNameAttrKey, op, strings.Join(activeHosts, ", "))
req.Filter.Expression = mergeFilterExpressions(req.Filter.Expression, statusClause)
return false
}
func (m *module) getHostsTableMetadata(ctx context.Context, req *inframonitoringtypes.HostsListRequest) (map[string]map[string]string, error) {
var nonGroupByAttrs []string
for _, key := range hostAttrKeysForMetadata {
if !isKeyInGroupByAttrs(req.GroupBy, key) {
nonGroupByAttrs = append(nonGroupByAttrs, key)
}
}
metadataMap, err := m.getMetadata(ctx, hostsTableMetricNamesList, req.GroupBy, nonGroupByAttrs, req.Filter, req.Start, req.End)
if err != nil {
return nil, err
}
return metadataMap, nil
}
// buildHostRecords constructs the final list of HostRecords for a page.
// Groups that had no metric data get default values of -1.
func (m *module) buildHostRecords(
resp *qbtypes.QueryRangeResponse,
pageGroups []map[string]string,
groupBy []qbtypes.GroupByKey,
metadataMap map[string]map[string]string,
activeHostsMap map[string]bool,
) []inframonitoringtypes.HostRecord {
metricsMap := parseFullQueryResponse(resp, groupBy)
records := make([]inframonitoringtypes.HostRecord, 0, len(pageGroups))
for _, labels := range pageGroups {
compositeKey := compositeKeyFromLabels(labels, groupBy)
hostName := labels[hostNameAttrKey]
var activeStatus string
if hostName != "" {
if activeHostsMap[hostName] {
activeStatus = inframonitoringtypes.HostStatusActive.StringValue()
} else {
activeStatus = inframonitoringtypes.HostStatusInactive.StringValue()
}
}
record := inframonitoringtypes.HostRecord{
HostName: hostName,
Status: activeStatus,
CPU: -1,
Memory: -1,
Wait: -1,
Load15: -1,
DiskUsage: -1,
Meta: map[string]interface{}{},
}
if metrics, ok := metricsMap[compositeKey]; ok {
if v, exists := metrics["F1"]; exists {
record.CPU = v
}
if v, exists := metrics["F2"]; exists {
record.Memory = v
}
if v, exists := metrics["F3"]; exists {
record.Wait = v
}
if v, exists := metrics["F4"]; exists {
record.DiskUsage = v
}
if v, exists := metrics["G"]; exists {
record.Load15 = v
}
}
if attrs, ok := metadataMap[compositeKey]; ok {
for k, v := range attrs {
record.Meta[k] = v
}
}
records = append(records, record)
}
return records
}
// getActiveHosts returns a set of host names that have reported metrics recently (since sinceUnixMilli).
// It queries distributed_metadata for hosts where last_reported_unix_milli >= sinceUnixMilli.
// TODO(nikhilmantri0902): This method does not return active hosts numbers based on custom grouping by. So
// if we have a different group by key than host.name in the API, then this method's response, will be useless technically, because
// with a group-by different from host.name, we should show count of active-inactive hosts in that group.
// We should have a way to determine active groups based on the group by keys in the request.
func (m *module) getActiveHosts(ctx context.Context, metricNames []string, hostNameAttr string) (map[string]bool, error) {
sinceUnixMilli := time.Now().Add(-10 * time.Minute).UTC().UnixMilli()
sb := sqlbuilder.NewSelectBuilder()
sb.Distinct()
sb.Select("attr_string_value")
sb.From(fmt.Sprintf("%s.%s", telemetrymetrics.DBName, telemetrymetrics.AttributesMetadataTableName))
sb.Where(
sb.In("metric_name", sqlbuilder.List(metricNames)),
sb.E("attr_name", hostNameAttr),
sb.GE("last_reported_unix_milli", sinceUnixMilli),
)
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
rows, err := m.telemetryStore.ClickhouseDB().Query(ctx, query, args...)
if err != nil {
return nil, err
}
defer rows.Close()
activeHosts := make(map[string]bool)
for rows.Next() {
var hostName string
if err := rows.Scan(&hostName); err != nil {
return nil, err
}
if hostName != "" {
activeHosts[hostName] = true
}
}
if err := rows.Err(); err != nil {
return nil, err
}
return activeHosts, nil
}

View File

@@ -1,147 +0,0 @@
package implinframonitoring
import (
"context"
"log/slog"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/modules/inframonitoring"
"github.com/SigNoz/signoz/pkg/querier"
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
"github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type module struct {
telemetryStore telemetrystore.TelemetryStore
telemetryMetadataStore telemetrytypes.MetadataStore
querier querier.Querier
fieldMapper qbtypes.FieldMapper
condBuilder qbtypes.ConditionBuilder
logger *slog.Logger
config inframonitoring.Config
}
// NewModule constructs the inframonitoring module with the provided dependencies.
func NewModule(
telemetryStore telemetrystore.TelemetryStore,
telemetryMetadataStore telemetrytypes.MetadataStore,
querier querier.Querier,
providerSettings factory.ProviderSettings,
cfg inframonitoring.Config,
) inframonitoring.Module {
fieldMapper := telemetrymetrics.NewFieldMapper()
condBuilder := telemetrymetrics.NewConditionBuilder(fieldMapper)
return &module{
telemetryStore: telemetryStore,
telemetryMetadataStore: telemetryMetadataStore,
querier: querier,
fieldMapper: fieldMapper,
condBuilder: condBuilder,
logger: providerSettings.Logger,
config: cfg,
}
}
func (m *module) HostsList(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.HostsListRequest) (*inframonitoringtypes.HostsListResponse, error) {
if err := req.Validate(); err != nil {
return nil, err
}
resp := &inframonitoringtypes.HostsListResponse{}
// default to cpu order by
if req.OrderBy == nil {
req.OrderBy = &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "cpu",
},
},
Direction: qbtypes.OrderDirectionDesc,
}
}
if err := m.validateOrderBy(req.OrderBy, orderByToHostsQueryNames); err != nil {
return nil, err
}
// default to host name group by
if len(req.GroupBy) == 0 {
req.GroupBy = []qbtypes.GroupByKey{hostNameGroupByKey}
resp.Type = ResponseTypeList
} else {
resp.Type = ResponseTypeGroupedList
}
// 1. Check if any host metrics exist and get earliest retention time.
// If no host metrics exist, return early — the UI shows the onboarding guide.
// 2. If metrics exist but req.End is before the earliest reported time, convey retention boundary.
count, minFirstReportedUnixMilli, err := m.getMetricsExistenceAndEarliestTime(ctx, hostsTableMetricNamesList)
if err != nil {
return nil, err
}
if count == 0 {
resp.SentAnyMetricsData = false
resp.Records = []inframonitoringtypes.HostRecord{}
resp.Total = 0
return resp, nil
}
resp.SentAnyMetricsData = true
if req.End < int64(minFirstReportedUnixMilli) {
resp.EndTimeBeforeRetention = true
resp.Records = []inframonitoringtypes.HostRecord{}
resp.Total = 0
return resp, nil
}
// Determine active hosts: those with metrics reported in the last 10 minutes.
activeHostsMap, err := m.getActiveHosts(ctx, hostsTableMetricNamesList, hostNameAttrKey)
if err != nil {
return nil, err
}
// this check below modifies req.Filter by adding `AND active hosts filter` if req.FilterByStatus is set.
if m.applyHostsActiveStatusFilter(req, activeHostsMap) {
resp.Records = []inframonitoringtypes.HostRecord{}
resp.Total = 0
return resp, nil
}
metadataMap, err := m.getHostsTableMetadata(ctx, req)
if err != nil {
return nil, err
}
if metadataMap == nil {
metadataMap = make(map[string]map[string]string)
}
resp.Total = len(metadataMap)
pageGroups, err := m.getTopHostGroups(ctx, orgID, req, metadataMap)
if err != nil {
return nil, err
}
if len(pageGroups) == 0 {
resp.Records = []inframonitoringtypes.HostRecord{}
return resp, nil
}
hostsFilterExpr := ""
if req.Filter != nil {
hostsFilterExpr = req.Filter.Expression
}
fullQueryReq := buildFullQueryRequest(req.Start, req.End, hostsFilterExpr, req.GroupBy, pageGroups, m.newHostsTableListQuery())
queryResp, err := m.querier.QueryRange(ctx, orgID, fullQueryReq)
if err != nil {
return nil, err
}
resp.Records = m.buildHostRecords(queryResp, pageGroups, req.GroupBy, metadataMap, activeHostsMap)
return resp, nil
}

View File

@@ -1,17 +0,0 @@
package inframonitoring
import (
"context"
"net/http"
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type Handler interface {
HostsList(http.ResponseWriter, *http.Request)
}
type Module interface {
HostsList(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.HostsListRequest) (*inframonitoringtypes.HostsListResponse, error)
}

View File

@@ -32,7 +32,7 @@ func newConfig() factory.Config {
Domain: "signozserviceaccount.com",
},
Analytics: AnalyticsConfig{
Enabled: true,
Enabled: false,
},
}
}

View File

@@ -21,7 +21,6 @@ import (
"github.com/SigNoz/signoz/pkg/global"
"github.com/SigNoz/signoz/pkg/identn"
"github.com/SigNoz/signoz/pkg/instrumentation"
"github.com/SigNoz/signoz/pkg/modules/inframonitoring"
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
"github.com/SigNoz/signoz/pkg/modules/serviceaccount"
"github.com/SigNoz/signoz/pkg/modules/user"
@@ -113,9 +112,6 @@ type Config struct {
// MetricsExplorer config
MetricsExplorer metricsexplorer.Config `mapstructure:"metricsexplorer"`
// InfraMonitoring config
InfraMonitoring inframonitoring.Config `mapstructure:"inframonitoring"`
// Flagger config
Flagger flagger.Config `mapstructure:"flagger"`
@@ -153,7 +149,6 @@ func NewConfig(ctx context.Context, logger *slog.Logger, resolverConfig config.R
gateway.NewConfigFactory(),
tokenizer.NewConfigFactory(),
metricsexplorer.NewConfigFactory(),
inframonitoring.NewConfigFactory(),
flagger.NewConfigFactory(),
user.NewConfigFactory(),
identn.NewConfigFactory(),

View File

@@ -18,8 +18,6 @@ import (
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
"github.com/SigNoz/signoz/pkg/modules/fields"
"github.com/SigNoz/signoz/pkg/modules/fields/implfields"
"github.com/SigNoz/signoz/pkg/modules/inframonitoring"
"github.com/SigNoz/signoz/pkg/modules/inframonitoring/implinframonitoring"
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer/implmetricsexplorer"
"github.com/SigNoz/signoz/pkg/modules/quickfilter"
@@ -53,7 +51,6 @@ type Handlers struct {
SpanPercentile spanpercentile.Handler
Services services.Handler
MetricsExplorer metricsexplorer.Handler
InfraMonitoring inframonitoring.Handler
Global global.Handler
FlaggerHandler flagger.Handler
GatewayHandler gateway.Handler
@@ -90,7 +87,6 @@ func NewHandlers(
RawDataExport: implrawdataexport.NewHandler(modules.RawDataExport),
Services: implservices.NewHandler(modules.Services),
MetricsExplorer: implmetricsexplorer.NewHandler(modules.MetricsExplorer),
InfraMonitoring: implinframonitoring.NewHandler(modules.InfraMonitoring),
SpanPercentile: implspanpercentile.NewHandler(modules.SpanPercentile),
Global: signozglobal.NewHandler(global),
FlaggerHandler: flagger.NewHandler(flaggerService),

View File

@@ -13,8 +13,6 @@ import (
"github.com/SigNoz/signoz/pkg/modules/authdomain"
"github.com/SigNoz/signoz/pkg/modules/authdomain/implauthdomain"
"github.com/SigNoz/signoz/pkg/modules/dashboard"
"github.com/SigNoz/signoz/pkg/modules/inframonitoring"
"github.com/SigNoz/signoz/pkg/modules/inframonitoring/implinframonitoring"
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer/implmetricsexplorer"
"github.com/SigNoz/signoz/pkg/modules/organization"
@@ -71,7 +69,6 @@ type Modules struct {
Services services.Module
SpanPercentile spanpercentile.Module
MetricsExplorer metricsexplorer.Module
InfraMonitoring inframonitoring.Module
Promote promote.Module
ServiceAccount serviceaccount.Module
RuleStateHistory rulestatehistory.Module
@@ -119,7 +116,6 @@ func NewModules(
SpanPercentile: implspanpercentile.NewModule(querier, providerSettings),
Services: implservices.NewModule(querier, telemetryStore),
MetricsExplorer: implmetricsexplorer.NewModule(telemetryStore, telemetryMetadataStore, cache, ruleStore, dashboard, providerSettings, config.MetricsExplorer),
InfraMonitoring: implinframonitoring.NewModule(telemetryStore, telemetryMetadataStore, querier, providerSettings, config.InfraMonitoring),
Promote: implpromote.NewModule(telemetryMetadataStore, telemetryStore),
ServiceAccount: implserviceaccount.NewModule(implserviceaccount.NewStore(sqlstore), authz, cache, analytics, providerSettings, config.ServiceAccount),
RuleStateHistory: implrulestatehistory.NewModule(implrulestatehistory.NewStore(telemetryStore, telemetryMetadataStore, providerSettings.Logger)),

View File

@@ -21,7 +21,6 @@ import (
"github.com/SigNoz/signoz/pkg/modules/dashboard"
"github.com/SigNoz/signoz/pkg/modules/fields"
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
"github.com/SigNoz/signoz/pkg/modules/inframonitoring"
"github.com/SigNoz/signoz/pkg/modules/organization"
"github.com/SigNoz/signoz/pkg/modules/preference"
"github.com/SigNoz/signoz/pkg/modules/promote"
@@ -60,7 +59,6 @@ func NewOpenAPI(ctx context.Context, instrumentation instrumentation.Instrumenta
struct{ dashboard.Module }{},
struct{ dashboard.Handler }{},
struct{ metricsexplorer.Handler }{},
struct{ inframonitoring.Handler }{},
struct{ gateway.Handler }{},
struct{ fields.Handler }{},
struct{ authz.Handler }{},

View File

@@ -276,7 +276,6 @@ func NewAPIServerProviderFactories(orgGetter organization.Getter, authz authz.Au
modules.Dashboard,
handlers.Dashboard,
handlers.MetricsExplorer,
handlers.InfraMonitoring,
handlers.GatewayHandler,
handlers.Fields,
handlers.AuthzHandler,

View File

@@ -1,8 +1,10 @@
package cloudintegrationtypes
import (
"encoding/json"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/valuer"
)
@@ -33,8 +35,6 @@ type GettableAccounts struct {
Accounts []*Account `json:"accounts" required:"true" nullable:"false"`
}
type GettableAccount = Account
type UpdatableAccount struct {
Config *AccountConfig `json:"config" required:"true" nullable:"false"`
}
@@ -42,3 +42,169 @@ type UpdatableAccount struct {
type AWSAccountConfig struct {
Regions []string `json:"regions" required:"true" nullable:"false"`
}
func NewAccount(orgID valuer.UUID, provider CloudProviderType, config *AccountConfig) *Account {
return &Account{
Identifiable: types.Identifiable{
ID: valuer.GenerateUUID(),
},
TimeAuditable: types.TimeAuditable{
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
OrgID: orgID,
Provider: provider,
Config: config,
}
}
func NewAccountFromStorable(storableAccount *StorableCloudIntegration) (*Account, error) {
// config can not be empty
if storableAccount.Config == "" {
return nil, errors.NewInternalf(errors.CodeInternal, "config is empty for account with id: %s", storableAccount.ID)
}
account := &Account{
Identifiable: storableAccount.Identifiable,
TimeAuditable: storableAccount.TimeAuditable,
ProviderAccountID: storableAccount.AccountID,
Provider: storableAccount.Provider,
RemovedAt: storableAccount.RemovedAt,
OrgID: storableAccount.OrgID,
Config: new(AccountConfig),
}
switch storableAccount.Provider {
case CloudProviderTypeAWS:
awsConfig := new(AWSAccountConfig)
err := json.Unmarshal([]byte(storableAccount.Config), awsConfig)
if err != nil {
return nil, err
}
account.Config.AWS = awsConfig
}
if storableAccount.LastAgentReport != nil {
account.AgentReport = &AgentReport{
TimestampMillis: storableAccount.LastAgentReport.TimestampMillis,
Data: storableAccount.LastAgentReport.Data,
}
}
return account, nil
}
func NewAccountsFromStorables(storableAccounts []*StorableCloudIntegration) ([]*Account, error) {
accounts := make([]*Account, 0, len(storableAccounts))
for _, storableAccount := range storableAccounts {
account, err := NewAccountFromStorable(storableAccount)
if err != nil {
return nil, err
}
accounts = append(accounts, account)
}
return accounts, nil
}
func (account *Account) Update(config *AccountConfig) error {
if account.RemovedAt != nil {
return errors.New(errors.TypeUnsupported, ErrCodeCloudIntegrationRemoved, "this operation is not supported for a removed cloud integration account")
}
account.Config = config
account.UpdatedAt = time.Now()
return nil
}
func (account *Account) IsRemoved() bool {
return account.RemovedAt != nil
}
func NewAccountConfigFromPostableArtifact(provider CloudProviderType, artifact *PostableConnectionArtifact) (*AccountConfig, error) {
switch provider {
case CloudProviderTypeAWS:
if artifact.Config.Aws == nil {
return nil, errors.NewInternalf(errors.CodeInternal, "AWS artifact is nil")
}
return &AccountConfig{
AWS: &AWSAccountConfig{
Regions: artifact.Config.Aws.Regions,
},
}, nil
}
return nil, errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput, "invalid cloud provider: %s", provider.StringValue())
}
func NewArtifactRequestFromPostableArtifact(provider CloudProviderType, artifact *PostableConnectionArtifact) (*ConnectionArtifactRequest, error) {
req := &ConnectionArtifactRequest{
Credentials: artifact.Credentials,
}
switch provider {
case CloudProviderTypeAWS:
if artifact.Config.Aws == nil {
return nil, errors.NewInternalf(errors.CodeInternal, "AWS artifact is nil")
}
req.Config = &ConnectionArtifactRequestConfig{
Aws: &AWSConnectionArtifactRequest{
DeploymentRegion: artifact.Config.Aws.DeploymentRegion,
Regions: artifact.Config.Aws.Regions,
},
}
return req, nil
}
return nil, errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput, "invalid cloud provider: %s", provider.StringValue())
}
func NewAgentReport(data map[string]any) *AgentReport {
return &AgentReport{
TimestampMillis: time.Now().UnixMilli(),
Data: data,
}
}
// ToJSON return JSON bytes for the provider's config
// thats why not naming it MarshalJSON(), as it will interfere with default JSON marshalling of AccountConfig struct.
// NOTE: this entertains first non-null provider's config.
func (config *AccountConfig) ToJSON() ([]byte, error) {
if config.AWS != nil {
return json.Marshal(config.AWS)
}
return nil, errors.NewInternalf(errors.CodeInternal, "no provider account config found")
}
func (updatable *UpdatableAccount) Validate(provider CloudProviderType) error {
if updatable.Config == nil {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
"config is required")
}
switch provider {
case CloudProviderTypeAWS:
if updatable.Config.AWS == nil {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
"aws configuration is required")
}
if len(updatable.Config.AWS.Regions) == 0 {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
"at least one region is required")
}
for _, region := range updatable.Config.AWS.Regions {
if _, ok := ValidAWSRegions[region]; !ok {
return errors.Newf(errors.TypeInvalidInput, ErrCodeInvalidCloudRegion,
"invalid AWS region: %s", region)
}
}
default:
return errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput,
"invalid cloud provider: %s", provider.StringValue())
}
return nil
}

View File

@@ -13,10 +13,16 @@ import (
)
var (
ErrCodeCloudIntegrationInvalidConfig = errors.MustNewCode("cloud_integration_invalid_config")
ErrCodeUnsupported = errors.MustNewCode("cloud_integration_unsupported")
ErrCodeCloudIntegrationNotFound = errors.MustNewCode("cloud_integration_not_found")
ErrCodeCloudIntegrationAlreadyExists = errors.MustNewCode("cloud_integration_already_exists")
ErrCodeCloudIntegrationAlreadyConnected = errors.MustNewCode("cloud_integration_already_connected")
ErrCodeCloudIntegrationRemoved = errors.MustNewCode("cloud_integration_removed")
ErrCodeInvalidInput = errors.MustNewCode("cloud_integration_invalid_input")
ErrCodeCloudIntegrationServiceNotFound = errors.MustNewCode("cloud_integration_service_not_found")
ErrCodeCloudIntegrationServiceAlreadyExists = errors.MustNewCode("cloud_integration_service_already_exists")
ErrCodeServiceDefinitionNotFound = errors.MustNewCode("service_definition_not_found")
)
// StorableCloudIntegration represents a cloud integration stored in the database.
@@ -52,6 +58,26 @@ type StorableCloudIntegrationService struct {
CloudIntegrationID valuer.UUID `bun:"cloud_integration_id,type:text"`
}
// Following Service config types are only internally used to store service config in DB and use JSON snake case keys for backward compatibility.
type StorableServiceConfig struct {
AWS *StorableAWSServiceConfig
}
type StorableAWSServiceConfig struct {
Logs *StorableAWSLogsServiceConfig `json:"logs,omitempty"`
Metrics *StorableAWSMetricsServiceConfig `json:"metrics,omitempty"`
}
type StorableAWSLogsServiceConfig struct {
Enabled bool `json:"enabled"`
S3Buckets map[string][]string `json:"s3_buckets,omitempty"` // region -> list of buckets in that region
}
type StorableAWSMetricsServiceConfig struct {
Enabled bool `json:"enabled"`
}
// Scan scans value from DB.
func (r *StorableAgentReport) Scan(src any) error {
var data []byte
@@ -68,10 +94,6 @@ func (r *StorableAgentReport) Scan(src any) error {
// Value creates value to be stored in DB.
func (r *StorableAgentReport) Value() (driver.Value, error) {
if r == nil {
return nil, errors.NewInternalf(errors.CodeInternal, "agent report is nil")
}
serialized, err := json.Marshal(r)
if err != nil {
return nil, errors.WrapInternalf(
@@ -81,3 +103,107 @@ func (r *StorableAgentReport) Value() (driver.Value, error) {
// Return as string instead of []byte to ensure PostgreSQL stores as text, not bytes
return string(serialized), nil
}
func NewStorableCloudIntegration(account *Account) (*StorableCloudIntegration, error) {
configBytes, err := account.Config.ToJSON()
if err != nil {
return nil, err
}
storableAccount := &StorableCloudIntegration{
Identifiable: account.Identifiable,
TimeAuditable: account.TimeAuditable,
Provider: account.Provider,
Config: string(configBytes),
AccountID: account.ProviderAccountID,
OrgID: account.OrgID,
RemovedAt: account.RemovedAt,
}
if account.AgentReport != nil {
storableAccount.LastAgentReport = &StorableAgentReport{
TimestampMillis: account.AgentReport.TimestampMillis,
Data: account.AgentReport.Data,
}
}
return storableAccount, nil
}
// NewStorableCloudIntegrationService creates a new StorableCloudIntegrationService with
// generated ID and timestamps from a CloudIntegrationService and its serialized config JSON.
func NewStorableCloudIntegrationService(svc *CloudIntegrationService, configJSON string) *StorableCloudIntegrationService {
return &StorableCloudIntegrationService{
Identifiable: svc.Identifiable,
TimeAuditable: svc.TimeAuditable,
Type: svc.Type,
Config: configJSON,
CloudIntegrationID: svc.CloudIntegrationID,
}
}
func (account *StorableCloudIntegration) Update(providerAccountID *string, agentReport *AgentReport) {
account.AccountID = providerAccountID
if agentReport != nil {
account.LastAgentReport = &StorableAgentReport{
TimestampMillis: agentReport.TimestampMillis,
Data: agentReport.Data,
}
}
}
// following StorableServiceConfig related functions are helper functions to convert between JSON string and ServiceConfig domain struct.
func newStorableServiceConfig(provider CloudProviderType, serviceID ServiceID, serviceConfig *ServiceConfig, supportedSignals *SupportedSignals) *StorableServiceConfig {
switch provider {
case CloudProviderTypeAWS:
storableAWSServiceConfig := new(StorableAWSServiceConfig)
if supportedSignals.Logs {
storableAWSServiceConfig.Logs = &StorableAWSLogsServiceConfig{
Enabled: serviceConfig.AWS.Logs.Enabled,
}
if serviceID == AWSServiceS3Sync {
storableAWSServiceConfig.Logs.S3Buckets = serviceConfig.AWS.Logs.S3Buckets
}
}
if supportedSignals.Metrics {
storableAWSServiceConfig.Metrics = &StorableAWSMetricsServiceConfig{
Enabled: serviceConfig.AWS.Metrics.Enabled,
}
}
return &StorableServiceConfig{AWS: storableAWSServiceConfig}
default:
return nil
}
}
func newStorableServiceConfigFromJSON(provider CloudProviderType, jsonStr string) (*StorableServiceConfig, error) {
switch provider {
case CloudProviderTypeAWS:
awsConfig := new(StorableAWSServiceConfig)
err := json.Unmarshal([]byte(jsonStr), awsConfig)
if err != nil {
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't parse AWS service config JSON")
}
return &StorableServiceConfig{AWS: awsConfig}, nil
default:
return nil, errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput, "invalid cloud provider: %s", provider.StringValue())
}
}
func (config *StorableServiceConfig) toJSON(provider CloudProviderType) ([]byte, error) {
switch provider {
case CloudProviderTypeAWS:
jsonBytes, err := json.Marshal(config.AWS)
if err != nil {
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't serialize AWS service config to JSON")
}
return jsonBytes, nil
default:
return nil, errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput, "invalid cloud provider: %s", provider.StringValue())
}
}

View File

@@ -1,6 +1,8 @@
package cloudintegrationtypes
import (
"fmt"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/valuer"
)
@@ -14,10 +16,14 @@ var (
CloudProviderTypeAzure = CloudProviderType{valuer.NewString("azure")}
// errors.
ErrCodeCloudProviderInvalidInput = errors.MustNewCode("invalid_cloud_provider")
ErrCodeCloudProviderInvalidInput = errors.MustNewCode("cloud_integration_invalid_cloud_provider")
AWSIntegrationUserEmail = valuer.MustNewEmail("aws-integration@signoz.io")
AzureIntegrationUserEmail = valuer.MustNewEmail("azure-integration@signoz.io")
CloudFormationQuickCreateBaseURL = valuer.NewString("https://%s.console.aws.amazon.com/cloudformation/home")
AgentCloudFormationTemplateS3Path = valuer.NewString("https://signoz-integrations.s3.us-east-1.amazonaws.com/aws-quickcreate-template-%s.json")
AgentCloudFormationBaseStackName = valuer.NewString("signoz-integration")
)
// CloudIntegrationUserEmails is the list of valid emails for Cloud One Click integrations.
@@ -39,3 +45,18 @@ func NewCloudProvider(provider string) (CloudProviderType, error) {
return CloudProviderType{}, errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput, "invalid cloud provider: %s", provider)
}
}
func GetCloudProviderEmail(provider CloudProviderType) (valuer.Email, error) {
switch provider {
case CloudProviderTypeAWS:
return AWSIntegrationUserEmail, nil
case CloudProviderTypeAzure:
return AzureIntegrationUserEmail, nil
default:
return valuer.Email{}, errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput, "invalid cloud provider: %s", provider.StringValue())
}
}
func NewIngestionKeyName(provider CloudProviderType) string {
return fmt.Sprintf("%s-integration", provider.StringValue())
}

View File

@@ -3,12 +3,26 @@ package cloudintegrationtypes
import (
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/valuer"
)
type SignozCredentials struct {
SigNozAPIURL string `json:"sigNozApiURL" required:"true"`
SigNozAPIKey string `json:"sigNozApiKey" required:"true"` // PAT
IngestionURL string `json:"ingestionUrl" required:"true"`
IngestionKey string `json:"ingestionKey" required:"true"`
}
type ConnectionArtifactRequest struct {
// required till new providers are added
Aws *AWSConnectionArtifactRequest `json:"aws" required:"true" nullable:"false"`
Config *ConnectionArtifactRequestConfig `json:"config" required:"true"`
Credentials *SignozCredentials `json:"credentials" required:"true"`
}
type ConnectionArtifactRequestConfig struct {
// as agent version is common for all providers, we can keep it at top level of this struct
AgentVersion string
Aws *AWSConnectionArtifactRequest `json:"aws" required:"true" nullable:"false"`
}
type AWSConnectionArtifactRequest struct {
@@ -33,8 +47,8 @@ type GettableAccountWithArtifact struct {
}
type AgentCheckInRequest struct {
ProviderAccountID string `json:"providerAccountId" required:"false"`
CloudIntegrationID string `json:"cloudIntegrationId" required:"false"`
ProviderAccountID string `json:"providerAccountId" required:"false"`
CloudIntegrationID valuer.UUID `json:"cloudIntegrationId" required:"false"`
Data map[string]any `json:"data" required:"true" nullable:"true"`
}
@@ -67,8 +81,8 @@ type GettableAgentCheckInResponse struct {
// IntegrationConfig older integration config struct for backward compatibility,
// this will be eventually removed once agents are updated to use new struct.
type IntegrationConfig struct {
EnabledRegions []string `json:"enabled_regions" required:"true" nullable:"false"` // backward compatible
Telemetry *AWSCollectionStrategy `json:"telemetry" required:"true" nullable:"false"` // backward compatible
EnabledRegions []string `json:"enabled_regions" required:"true" nullable:"false"` // backward compatible
Telemetry *OldAWSCollectionStrategy `json:"telemetry" required:"true" nullable:"false"` // backward compatible
}
type ProviderIntegrationConfig struct {
@@ -79,3 +93,110 @@ type AWSIntegrationConfig struct {
EnabledRegions []string `json:"enabledRegions" required:"true" nullable:"false"`
Telemetry *AWSCollectionStrategy `json:"telemetry" required:"true" nullable:"false"`
}
// NewGettableAgentCheckInResponse constructs a backward-compatible response from an AgentCheckInResponse.
// It populates the old snake_case fields (account_id, cloud_account_id, integration_config, removed_at)
// from the new camelCase fields so older agents continue to work unchanged.
// The provider parameter controls which provider-specific block is mapped into the legacy integration_config.
func NewGettableAgentCheckInResponse(provider CloudProviderType, resp *AgentCheckInResponse) *GettableAgentCheckInResponse {
gettable := &GettableAgentCheckInResponse{
AccountID: resp.CloudIntegrationID,
CloudAccountID: resp.ProviderAccountID,
OlderRemovedAt: resp.RemovedAt,
AgentCheckInResponse: *resp,
}
switch provider {
case CloudProviderTypeAWS:
gettable.OlderIntegrationConfig = awsOlderIntegrationConfig(resp.IntegrationConfig)
}
return gettable
}
// Validate checks that the connection artifact request has a valid provider-specific block
// with non-empty, valid regions and a valid deployment region.
func (req *ConnectionArtifactRequest) Validate(provider CloudProviderType) error {
if req.Config == nil || req.Credentials == nil {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
"config and credentials are required")
}
if req.Credentials.SigNozAPIURL == "" {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
"sigNozApiURL can not be empty")
}
if req.Credentials.SigNozAPIKey == "" {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
"sigNozApiKey can not be empty")
}
if req.Credentials.IngestionURL == "" {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
"ingestionUrl can not be empty")
}
if req.Credentials.IngestionKey == "" {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
"ingestionKey can not be empty")
}
switch provider {
case CloudProviderTypeAWS:
if req.Config.Aws == nil {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
"aws configuration is required")
}
return req.Config.Aws.Validate()
}
return errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput,
"invalid cloud provider: %s", provider)
}
// Validate checks that the AWS connection artifact request has a valid deployment region
// and a non-empty list of valid regions.
func (req *AWSConnectionArtifactRequest) Validate() error {
if req.DeploymentRegion == "" {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
"deploymentRegion is required")
}
if _, ok := ValidAWSRegions[req.DeploymentRegion]; !ok {
return errors.Newf(errors.TypeInvalidInput, ErrCodeInvalidCloudRegion,
"invalid deployment region: %s", req.DeploymentRegion)
}
if len(req.Regions) == 0 {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
"at least one region is required")
}
for _, region := range req.Regions {
if _, ok := ValidAWSRegions[region]; !ok {
return errors.Newf(errors.TypeInvalidInput, ErrCodeInvalidCloudRegion,
"invalid AWS region: %s", region)
}
}
return nil
}
// Validate checks that the request uses either old fields (account_id, cloud_account_id) or
// new fields (cloudIntegrationId, providerAccountId), never a mix of both.
func (req *PostableAgentCheckInRequest) Validate() error {
hasOldFields := req.ID != "" || req.AccountID != ""
hasNewFields := !req.CloudIntegrationID.IsZero() || req.ProviderAccountID != ""
if hasOldFields && hasNewFields {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
"request must use either old fields (account_id, cloud_account_id) or new fields (cloudIntegrationId, providerAccountId), not both")
}
if !hasOldFields && !hasNewFields {
return errors.New(errors.TypeInvalidInput, ErrCodeInvalidInput,
"request must provide either old fields (account_id, cloud_account_id) or new fields (cloudIntegrationId, providerAccountId)")
}
return nil
}
func (config *ConnectionArtifactRequestConfig) AddAgentVersion(agentVersion string) {
config.AgentVersion = agentVersion
}

View File

@@ -4,10 +4,7 @@ import (
"github.com/SigNoz/signoz/pkg/errors"
)
var (
ErrCodeInvalidCloudRegion = errors.MustNewCode("invalid_cloud_region")
ErrCodeMismatchCloudProvider = errors.MustNewCode("cloud_provider_mismatch")
)
var ErrCodeInvalidCloudRegion = errors.MustNewCode("invalid_cloud_region")
// List of all valid cloud regions on Amazon Web Services.
var ValidAWSRegions = map[string]struct{}{

View File

@@ -2,6 +2,7 @@ package cloudintegrationtypes
import (
"fmt"
"strings"
"time"
"github.com/SigNoz/signoz/pkg/errors"
@@ -45,13 +46,13 @@ type GettableServicesMetadata struct {
Services []*ServiceMetadata `json:"services" required:"true" nullable:"false"`
}
// Service represents a cloud integration service with its definition,
// cloud integration service is non nil only when the service entry exists in DB with ANY config (enabled or disabled).
type Service struct {
ServiceDefinition
ServiceConfig *ServiceConfig `json:"serviceConfig" required:"false" nullable:"false"`
CloudIntegrationService *CloudIntegrationService `json:"cloudIntegrationService" required:"true" nullable:"true"`
}
type GettableService = Service
type UpdatableService struct {
Config *ServiceConfig `json:"config" required:"true" nullable:"false"`
}
@@ -60,7 +61,7 @@ type ServiceDefinition struct {
ServiceDefinitionMetadata
Overview string `json:"overview" required:"true"` // markdown
Assets Assets `json:"assets" required:"true"`
SupportedSignals SupportedSignals `json:"supported_signals" required:"true"`
SupportedSignals SupportedSignals `json:"supportedSignals" required:"true"`
DataCollected DataCollected `json:"dataCollected" required:"true"`
Strategy *CollectionStrategy `json:"telemetryCollectionStrategy" required:"true" nullable:"false"`
}
@@ -92,7 +93,7 @@ type AWSServiceConfig struct {
// NOTE: the JSON keys are snake case for backward compatibility with existing agents.
type AWSServiceLogsConfig struct {
Enabled bool `json:"enabled"`
S3Buckets map[string][]string `json:"s3_buckets,omitempty"`
S3Buckets map[string][]string `json:"s3Buckets,omitempty"`
}
type AWSServiceMetricsConfig struct {
@@ -121,19 +122,38 @@ type CollectedMetric struct {
}
// AWSCollectionStrategy represents signal collection strategy for AWS services.
// this is AWS specific.
// NOTE: this structure is still using snake case, for backward compatibility,
// with existing agents.
type AWSCollectionStrategy struct {
Metrics *AWSMetricsStrategy `json:"aws_metrics,omitempty"`
Logs *AWSLogsStrategy `json:"aws_logs,omitempty"`
S3Buckets map[string][]string `json:"s3_buckets,omitempty"` // Only available in S3 Sync Service Type in AWS
Metrics *AWSMetricsStrategy `json:"metrics,omitempty"`
Logs *AWSLogsStrategy `json:"logs,omitempty"`
S3Buckets map[string][]string `json:"s3Buckets,omitempty"` // Only available in S3 Sync Service Type in AWS
}
// OldAWSCollectionStrategy is the backward-compatible snake_case form of AWSCollectionStrategy,
// used in the legacy integration_config response field for older agents.
type OldAWSCollectionStrategy struct {
Provider string `json:"provider"`
Metrics *OldAWSMetricsStrategy `json:"aws_metrics,omitempty"`
Logs *OldAWSLogsStrategy `json:"aws_logs,omitempty"`
S3Buckets map[string][]string `json:"s3_buckets,omitempty"`
}
// OldAWSMetricsStrategy is the snake_case form of AWSMetricsStrategy for older agents.
type OldAWSMetricsStrategy struct {
StreamFilters []struct {
Namespace string `json:"Namespace"`
MetricNames []string `json:"MetricNames,omitempty"`
} `json:"cloudwatch_metric_stream_filters"`
}
// OldAWSLogsStrategy is the snake_case form of AWSLogsStrategy for older agents.
type OldAWSLogsStrategy struct {
Subscriptions []struct {
LogGroupNamePrefix string `json:"log_group_name_prefix"`
FilterPattern string `json:"filter_pattern"`
} `json:"cloudwatch_logs_subscriptions"`
}
// AWSMetricsStrategy represents metrics collection strategy for AWS services.
// this is AWS specific.
// NOTE: this structure is still using snake case, for backward compatibility,
// with existing agents.
type AWSMetricsStrategy struct {
// to be used as https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-metricstream.html#cfn-cloudwatch-metricstream-includefilters
StreamFilters []struct {
@@ -141,23 +161,20 @@ type AWSMetricsStrategy struct {
// https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudwatch-metricstream-metricstreamfilter.html
Namespace string `json:"Namespace"`
MetricNames []string `json:"MetricNames,omitempty"`
} `json:"cloudwatch_metric_stream_filters"`
} `json:"cloudwatchMetricStreamFilters"`
}
// AWSLogsStrategy represents logs collection strategy for AWS services.
// this is AWS specific.
// NOTE: this structure is still using snake case, for backward compatibility,
// with existing agents.
type AWSLogsStrategy struct {
Subscriptions []struct {
// subscribe to all logs groups with specified prefix.
// eg: `/aws/rds/`
LogGroupNamePrefix string `json:"log_group_name_prefix"`
LogGroupNamePrefix string `json:"logGroupNamePrefix"`
// https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html
// "" implies no filtering is required.
FilterPattern string `json:"filter_pattern"`
} `json:"cloudwatch_logs_subscriptions"`
FilterPattern string `json:"filterPattern"`
} `json:"cloudwatchLogsSubscriptions"`
}
// Dashboard represents a dashboard definition for cloud integration.
@@ -170,12 +187,156 @@ type Dashboard struct {
Definition dashboardtypes.StorableDashboardData `json:"definition,omitempty"`
}
// UTILS
func NewCloudIntegrationService(serviceID ServiceID, cloudIntegrationID valuer.UUID, config *ServiceConfig) *CloudIntegrationService {
return &CloudIntegrationService{
Identifiable: types.Identifiable{
ID: valuer.GenerateUUID(),
},
TimeAuditable: types.TimeAuditable{
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
Type: serviceID,
Config: config,
CloudIntegrationID: cloudIntegrationID,
}
}
func NewCloudIntegrationServiceFromStorable(stored *StorableCloudIntegrationService, config *ServiceConfig) *CloudIntegrationService {
return &CloudIntegrationService{
Identifiable: stored.Identifiable,
TimeAuditable: stored.TimeAuditable,
Type: stored.Type,
Config: config,
CloudIntegrationID: stored.CloudIntegrationID,
}
}
func NewServiceMetadata(definition ServiceDefinition, enabled bool) *ServiceMetadata {
return &ServiceMetadata{
ServiceDefinitionMetadata: definition.ServiceDefinitionMetadata,
Enabled: enabled,
}
}
func NewService(def ServiceDefinition, storableService *CloudIntegrationService) *Service {
return &Service{
ServiceDefinition: def,
CloudIntegrationService: storableService,
}
}
func NewServiceConfigFromJSON(provider CloudProviderType, jsonString string) (*ServiceConfig, error) {
storableServiceConfig, err := newStorableServiceConfigFromJSON(provider, jsonString)
if err != nil {
return nil, err
}
switch provider {
case CloudProviderTypeAWS:
awsServiceConfig := new(AWSServiceConfig)
if storableServiceConfig.AWS.Logs != nil {
awsServiceConfig.Logs = &AWSServiceLogsConfig{
Enabled: storableServiceConfig.AWS.Logs.Enabled,
S3Buckets: storableServiceConfig.AWS.Logs.S3Buckets,
}
}
if storableServiceConfig.AWS.Metrics != nil {
awsServiceConfig.Metrics = &AWSServiceMetricsConfig{
Enabled: storableServiceConfig.AWS.Metrics.Enabled,
}
}
return &ServiceConfig{AWS: awsServiceConfig}, nil
default:
return nil, errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput, "invalid cloud provider: %s", provider.StringValue())
}
}
// Update sets the service config.
func (service *CloudIntegrationService) Update(config *ServiceConfig) {
service.Config = config
service.UpdatedAt = time.Now()
}
// IsServiceEnabled returns true if the service has at least one signal (logs or metrics) enabled
// for the given cloud provider.
func IsServiceEnabled(provider CloudProviderType, config *ServiceConfig) bool {
switch provider {
case CloudProviderTypeAWS:
logsEnabled := config.AWS.Logs != nil && config.AWS.Logs.Enabled
metricsEnabled := config.AWS.Metrics != nil && config.AWS.Metrics.Enabled
return logsEnabled || metricsEnabled
default:
return false
}
}
// IsMetricsEnabled returns true if metrics are explicitly enabled for the given cloud provider.
// Used to gate dashboard availability — dashboards are only shown when metrics are enabled.
func IsMetricsEnabled(provider CloudProviderType, config *ServiceConfig) bool {
switch provider {
case CloudProviderTypeAWS:
return config.AWS.Metrics != nil && config.AWS.Metrics.Enabled
default:
return false
}
}
// IsLogsEnabled returns true if logs are explicitly enabled for the given cloud provider.
func IsLogsEnabled(provider CloudProviderType, config *ServiceConfig) bool {
switch provider {
case CloudProviderTypeAWS:
return config.AWS.Logs != nil && config.AWS.Logs.Enabled
default:
return false
}
}
func (serviceConfig *ServiceConfig) ToJSON(provider CloudProviderType, serviceID ServiceID, supportedSignals *SupportedSignals) ([]byte, error) {
storableServiceConfig := newStorableServiceConfig(provider, serviceID, serviceConfig, supportedSignals)
return storableServiceConfig.toJSON(provider)
}
func (updatableService *UpdatableService) Validate(provider CloudProviderType, serviceID ServiceID) error {
switch provider {
case CloudProviderTypeAWS:
if updatableService.Config.AWS == nil {
return errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput, "AWS config is required for AWS service")
}
if serviceID == AWSServiceS3Sync {
if updatableService.Config.AWS.Logs == nil || updatableService.Config.AWS.Logs.S3Buckets == nil {
return errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput, "AWS S3 Sync service requires S3 bucket configuration for logs")
}
}
return nil
default:
return errors.NewInvalidInputf(ErrCodeCloudProviderInvalidInput, "invalid cloud provider: %s", provider.StringValue())
}
}
// GetCloudIntegrationDashboardID returns the dashboard id for a cloud integration, given the cloud provider, service id, and dashboard id.
// This is used to generate unique dashboard ids for cloud integration, and also to parse the dashboard id to get the cloud provider and service id when needed.
func GetCloudIntegrationDashboardID(cloudProvider CloudProviderType, svcID, dashboardID string) string {
return fmt.Sprintf("cloud-integration--%s--%s--%s", cloudProvider, svcID, dashboardID)
return fmt.Sprintf("cloud-integration--%s--%s--%s", cloudProvider.StringValue(), svcID, dashboardID)
}
// ParseCloudIntegrationDashboardID parses a dashboard id generated by GetCloudIntegrationDashboardID
// into its constituent parts (cloudProvider, serviceID, dashboardID).
func ParseCloudIntegrationDashboardID(id string) (CloudProviderType, string, string, error) {
parts := strings.SplitN(id, "--", 4)
if len(parts) != 4 || parts[0] != "cloud-integration" {
return CloudProviderType{}, "", "", errors.New(errors.TypeNotFound, ErrCodeCloudIntegrationNotFound, "invalid cloud integration dashboard id")
}
provider, err := NewCloudProvider(parts[1])
if err != nil {
return CloudProviderType{}, "", "", err
}
return provider, parts[2], parts[3], nil
}
// GetDashboardsFromAssets returns the list of dashboards for the cloud provider service from definition.
@@ -189,9 +350,9 @@ func GetDashboardsFromAssets(
dashboards := make([]*dashboardtypes.Dashboard, 0)
for _, d := range assets.Dashboards {
author := fmt.Sprintf("%s-integration", cloudProvider)
author := fmt.Sprintf("%s-integration", cloudProvider.StringValue())
dashboards = append(dashboards, &dashboardtypes.Dashboard{
ID: GetCloudIntegrationDashboardID(cloudProvider, svcID, d.ID),
ID: d.ID,
Locked: true,
OrgID: orgID,
Data: d.Definition,
@@ -208,3 +369,53 @@ func GetDashboardsFromAssets(
return dashboards
}
// awsOlderIntegrationConfig converts a ProviderIntegrationConfig into the legacy snake_case
// IntegrationConfig format consumed by older AWS agents. Returns nil if AWS config is absent.
func awsOlderIntegrationConfig(cfg *ProviderIntegrationConfig) *IntegrationConfig {
if cfg == nil || cfg.AWS == nil {
return nil
}
awsCfg := cfg.AWS
older := &IntegrationConfig{
EnabledRegions: awsCfg.EnabledRegions,
}
if awsCfg.Telemetry == nil {
return older
}
// Older agents expect a "provider" field and fully snake_case keys inside telemetry.
oldTelemetry := &OldAWSCollectionStrategy{
Provider: CloudProviderTypeAWS.StringValue(),
S3Buckets: awsCfg.Telemetry.S3Buckets,
}
if awsCfg.Telemetry.Metrics != nil {
// Convert camelCase cloudwatchMetricStreamFilters → snake_case cloudwatch_metric_stream_filters
oldMetrics := &OldAWSMetricsStrategy{}
for _, f := range awsCfg.Telemetry.Metrics.StreamFilters {
oldMetrics.StreamFilters = append(oldMetrics.StreamFilters, struct {
Namespace string `json:"Namespace"`
MetricNames []string `json:"MetricNames,omitempty"`
}{Namespace: f.Namespace, MetricNames: f.MetricNames})
}
oldTelemetry.Metrics = oldMetrics
}
if awsCfg.Telemetry.Logs != nil {
// Convert camelCase cloudwatchLogsSubscriptions → snake_case cloudwatch_logs_subscriptions
oldLogs := &OldAWSLogsStrategy{}
for _, s := range awsCfg.Telemetry.Logs.Subscriptions {
oldLogs.Subscriptions = append(oldLogs.Subscriptions, struct {
LogGroupNamePrefix string `json:"log_group_name_prefix"`
FilterPattern string `json:"filter_pattern"`
}{LogGroupNamePrefix: s.LogGroupNamePrefix, FilterPattern: s.FilterPattern})
}
oldTelemetry.Logs = oldLogs
}
older.Telemetry = oldTelemetry
return older
}

View File

@@ -38,6 +38,8 @@ type Store interface {
// UpdateService updates an existing cloud integration service
UpdateService(ctx context.Context, service *StorableCloudIntegrationService) error
RunInTx(context.Context, func(ctx context.Context) error) error
}
type ServiceDefinitionStore interface {

View File

@@ -1,114 +0,0 @@
package inframonitoringtypes
import (
"encoding/json"
"github.com/SigNoz/signoz/pkg/errors"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/valuer"
)
type HostStatus struct {
valuer.String
}
var (
HostStatusActive = HostStatus{valuer.NewString("active")}
HostStatusInactive = HostStatus{valuer.NewString("inactive")}
HostStatusNone = HostStatus{valuer.NewString("")}
)
func (HostStatus) Enum() []any {
return []any{
HostStatusActive,
HostStatusInactive,
HostStatusNone,
}
}
type HostsListRequest struct {
Start int64 `json:"start"`
End int64 `json:"end"`
Filter *qbtypes.Filter `json:"filter"`
FilterByStatus HostStatus `json:"filterByStatus"`
GroupBy []qbtypes.GroupByKey `json:"groupBy"`
OrderBy *qbtypes.OrderBy `json:"orderBy"`
Offset int `json:"offset"`
Limit int `json:"limit"`
}
// Validate ensures HostsListRequest contains acceptable values.
func (req *HostsListRequest) Validate() error {
if req == nil {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "request is nil")
}
if req.Start <= 0 {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"invalid start time %d: start must be greater than 0",
req.Start,
)
}
if req.End <= 0 {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"invalid end time %d: end must be greater than 0",
req.End,
)
}
if req.Start >= req.End {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"invalid time range: start (%d) must be less than end (%d)",
req.Start,
req.End,
)
}
if req.Limit < 1 || req.Limit > 5000 {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "limit must be between 1 and 5000")
}
if req.Offset < 0 {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "offset cannot be negative")
}
if !req.FilterByStatus.IsZero() && req.FilterByStatus != HostStatusActive && req.FilterByStatus != HostStatusInactive {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid filter by status: %s", req.FilterByStatus)
}
return nil
}
// UnmarshalJSON validates input immediately after decoding.
func (req *HostsListRequest) UnmarshalJSON(data []byte) error {
type raw HostsListRequest
var decoded raw
if err := json.Unmarshal(data, &decoded); err != nil {
return err
}
*req = HostsListRequest(decoded)
return req.Validate()
}
type HostsListResponse struct {
Type string `json:"type"` // TODO(nikhilmantri0902): should this also be changed to valuer.string?
Records []HostRecord `json:"records"`
Total int `json:"total"`
SentAnyMetricsData bool `json:"sentAnyMetricsData"`
EndTimeBeforeRetention bool `json:"endTimeBeforeRetention"`
}
type HostRecord struct {
HostName string `json:"hostName"`
Status string `json:"status"`
CPU float64 `json:"cpu"`
Memory float64 `json:"memory"`
Wait float64 `json:"wait"`
Load15 float64 `json:"load15"`
DiskUsage float64 `json:"diskUsage"`
Meta map[string]interface{} `json:"meta"`
}

View File

@@ -1,178 +0,0 @@
package inframonitoringtypes
import (
"testing"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/stretchr/testify/require"
)
func TestHostsListRequest_Validate(t *testing.T) {
tests := []struct {
name string
req *HostsListRequest
wantErr bool
}{
{
name: "valid request",
req: &HostsListRequest{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
},
wantErr: false,
},
{
name: "nil request",
req: nil,
wantErr: true,
},
{
name: "start time zero",
req: &HostsListRequest{
Start: 0,
End: 2000,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "start time negative",
req: &HostsListRequest{
Start: -1000,
End: 2000,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "end time zero",
req: &HostsListRequest{
Start: 1000,
End: 0,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "start time greater than end time",
req: &HostsListRequest{
Start: 2000,
End: 1000,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "start time equal to end time",
req: &HostsListRequest{
Start: 1000,
End: 1000,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "limit zero",
req: &HostsListRequest{
Start: 1000,
End: 2000,
Limit: 0,
Offset: 0,
},
wantErr: true,
},
{
name: "limit negative",
req: &HostsListRequest{
Start: 1000,
End: 2000,
Limit: -10,
Offset: 0,
},
wantErr: true,
},
{
name: "limit exceeds max",
req: &HostsListRequest{
Start: 1000,
End: 2000,
Limit: 5001,
Offset: 0,
},
wantErr: true,
},
{
name: "offset negative",
req: &HostsListRequest{
Start: 1000,
End: 2000,
Limit: 100,
Offset: -5,
},
wantErr: true,
},
{
name: "filter by status ACTIVE",
req: &HostsListRequest{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
FilterByStatus: HostStatusActive,
},
wantErr: false,
},
{
name: "filter by status INACTIVE",
req: &HostsListRequest{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
FilterByStatus: HostStatusInactive,
},
wantErr: false,
},
{
name: "filter by status empty (zero value)",
req: &HostsListRequest{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
},
wantErr: false,
},
{
name: "filter by status invalid value",
req: &HostsListRequest{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
FilterByStatus: HostStatus{valuer.NewString("UNKNOWN")},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := tt.req.Validate()
if tt.wantErr {
require.Error(t, err)
require.True(t, errors.Ast(err, errors.TypeInvalidInput), "expected error to be of type InvalidInput")
} else {
require.NoError(t, err)
}
})
}
}

View File

@@ -20,6 +20,7 @@ var (
ErrCodeAPIKeyAlreadyExists = errors.MustNewCode("api_key_already_exists")
ErrCodeAPIKeytNotFound = errors.MustNewCode("api_key_not_found")
ErrCodeAPIKeyExpired = errors.MustNewCode("api_key_expired")
errInvalidAPIKeyName = errors.New(errors.TypeInvalidInput, ErrCodeAPIKeyInvalidInput, "name must be 180 characters long and contain only lowercase letters (a-z) and hyphens (-)")
)
type FactorAPIKey struct {
@@ -112,7 +113,7 @@ func (key *PostableFactorAPIKey) UnmarshalJSON(data []byte) error {
}
if match := factorAPIKeyNameRegex.MatchString(temp.Name); !match {
return errors.Newf(errors.TypeInvalidInput, ErrCodeAPIKeyInvalidInput, "name must conform to the regex: %s", factorAPIKeyNameRegex.String())
return errInvalidAPIKeyName
}
if temp.ExpiresAt != 0 && time.Now().After(time.Unix(int64(temp.ExpiresAt), 0)) {
@@ -132,7 +133,7 @@ func (key *UpdatableFactorAPIKey) UnmarshalJSON(data []byte) error {
}
if match := factorAPIKeyNameRegex.MatchString(temp.Name); !match {
return errors.Newf(errors.TypeInvalidInput, ErrCodeAPIKeyInvalidInput, "name must conform to the regex: %s", factorAPIKeyNameRegex.String())
return errInvalidAPIKeyName
}
if temp.ExpiresAt != 0 && time.Now().After(time.Unix(int64(temp.ExpiresAt), 0)) {

View File

@@ -23,6 +23,7 @@ var (
ErrCodeServiceAccountNotFound = errors.MustNewCode("service_account_not_found")
ErrCodeServiceAccountRoleAlreadyExists = errors.MustNewCode("service_account_role_already_exists")
ErrCodeServiceAccountOperationUnsupported = errors.MustNewCode("service_account_operation_unsupported")
errInvalidServiceAccountName = errors.New(errors.TypeInvalidInput, ErrCodeServiceAccountInvalidInput, "name must be 150 characters long and contain only lowercase letters (a-z) and hyphens (-)")
)
var (
@@ -214,7 +215,7 @@ func (serviceAccount *PostableServiceAccount) UnmarshalJSON(data []byte) error {
}
if match := serviceAccountNameRegex.MatchString(temp.Name); !match {
return errors.Newf(errors.TypeInvalidInput, ErrCodeServiceAccountInvalidInput, "name must conform to the regex: %s", serviceAccountNameRegex.String())
return errInvalidServiceAccountName
}
*serviceAccount = PostableServiceAccount(temp)