Compare commits

..

39 Commits

Author SHA1 Message Date
Swapnil Nakade
9d53ee5053 feat: adding more generic struct 2026-02-13 18:49:31 +05:30
Piyush Singariya
c62b4d9141 test: testing generics 2026-02-13 16:06:57 +05:30
Swapnil Nakade
5720fcb654 feat: adding azure cloud integration apis 2026-02-12 23:49:43 +05:30
Swapnil Nakade
d32911b0fd Merge branch 'main' into refactor/aws-cloudintegration 2026-02-12 22:59:03 +05:30
Swapnil Nakade
22fcb7e9fb feat: updating cloud integration aws api 2026-02-12 22:53:26 +05:30
Swapnil Nakade
e8d009d225 feat: adding logger in opts 2026-02-12 16:41:23 +05:30
Swapnil Nakade
25b143d21a Merge branch 'main' into refactor/aws-cloudintegration 2026-02-12 16:35:41 +05:30
Swapnil Nakade
4487050375 refactor: improving aws cloud integration apis structure 2026-02-12 16:34:16 +05:30
Swapnil Nakade
f3732611ca refactor: aws cloud integration provider impl 2026-02-12 15:08:55 +05:30
Swapnil Nakade
989ca522f8 refactor: updating azure telemetry collections strat 2026-02-12 14:54:24 +05:30
Swapnil Nakade
9a2e9d76b5 feat: updating service definitions 2026-02-10 20:11:40 +05:30
Swapnil Nakade
2be42deecd Merge branch 'main' into feat/azure-integration 2026-02-10 18:44:40 +05:30
Swapnil Nakade
95cad880cc feat: adding default values for azure service config 2026-02-10 18:21:26 +05:30
Swapnil Nakade
cfef1091b3 Merge branch 'main' into feat/azure-integration 2026-02-09 15:38:17 +05:30
Swapnil Nakade
4504c364f2 feat: adding comments 2026-02-09 15:37:10 +05:30
Swapnil Nakade
1a006870e1 refactor: removing wrong deps 2026-02-09 15:24:02 +05:30
Swapnil Nakade
e7a27a1cfb Merge branch 'main' into feat/azure-integration 2026-02-09 15:19:36 +05:30
Swapnil Nakade
1e7323ead2 refactor: updating response for conn status 2026-02-08 22:56:04 +05:30
Swapnil Nakade
af4c6c5b52 Merge branch 'main' into feat/azure-integration 2026-02-08 18:24:47 +05:30
Swapnil Nakade
02262ba245 refactor: wip 2026-02-08 17:28:04 +05:30
Swapnil Nakade
df7c9e1339 chore: wip 2026-02-08 00:09:29 +05:30
Swapnil Nakade
ac5e52479f refactor: removing comment 2026-02-06 17:48:49 +05:30
Swapnil Nakade
de56477bbb Merge branch 'main' into feat/azure-integration 2026-02-06 17:45:40 +05:30
Swapnil Nakade
fddd8a27fa fix: aws connection url generation 2026-02-06 17:44:06 +05:30
Swapnil Nakade
2aa4f8e237 feat: adding logs 2026-02-06 11:57:23 +05:30
Swapnil Nakade
74006a214b ci: fixing lint ci issues 2026-02-06 04:01:39 +05:30
Swapnil Nakade
ed2cbacadc Merge branch 'main' into feat/azure-integration 2026-02-06 03:55:33 +05:30
Swapnil Nakade
3cbd529843 refactor: reverting 2026-02-06 03:51:29 +05:30
Swapnil Nakade
78b481e895 refactor: cloud integration API cleanup 2026-02-06 03:42:25 +05:30
Swapnil Nakade
215098ec0d refactor: sorting services list for consistency 2026-02-03 13:59:26 +05:30
Swapnil Nakade
5a4ef2e4ce refactor: code beautification 2026-02-03 01:16:58 +05:30
Swapnil Nakade
b1f33c4f7f refactor: updating service details api 2026-02-03 00:52:27 +05:30
Swapnil Nakade
713c84b1e4 refactor: updating cloud provider type 2026-02-03 00:10:18 +05:30
Swapnil Nakade
c3daf9e428 Merge branch 'main' into feat/azure-integration 2026-02-02 23:37:45 +05:30
Swapnil Nakade
70a908deb1 refactor: updating cloud-integration controller 2026-02-02 23:36:46 +05:30
Swapnil Nakade
cc9cdded3c refactor: updating cloud integration apis 2026-02-02 21:50:15 +05:30
Swapnil Nakade
77067cd614 feat: extending cloud integration apis 2026-01-29 12:20:19 +05:30
Swapnil Nakade
ab703d9a65 Merge branch 'main' into feat/azure-integration 2026-01-28 18:28:16 +05:30
Swapnil Nakade
611e8fbf9e refactor: updating azure cloud integrations api 2026-01-28 11:18:58 +05:30
130 changed files with 10398 additions and 10471 deletions

View File

@@ -1,6 +1,7 @@
package api
import (
"log/slog"
"net/http"
"net/http/httputil"
"time"
@@ -13,7 +14,6 @@ import (
"github.com/SigNoz/signoz/pkg/http/middleware"
querierAPI "github.com/SigNoz/signoz/pkg/querier"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
@@ -30,13 +30,13 @@ type APIHandlerOptions struct {
RulesManager *rules.Manager
UsageManager *usage.Manager
IntegrationsController *integrations.Controller
CloudIntegrationsController *cloudintegrations.Controller
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
Gateway *httputil.ReverseProxy
GatewayUrl string
// Querier Influx Interval
FluxInterval time.Duration
GlobalConfig global.Config
Logger *slog.Logger // this is present in Signoz.Instrumentation but adding for quick access
}
type APIHandler struct {
@@ -50,7 +50,6 @@ func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler,
Reader: opts.DataConnector,
RuleManager: opts.RulesManager,
IntegrationsController: opts.IntegrationsController,
CloudIntegrationsController: opts.CloudIntegrationsController,
LogsParsingPipelineController: opts.LogsParsingPipelineController,
FluxInterval: opts.FluxInterval,
AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager),
@@ -58,6 +57,7 @@ func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler,
Signoz: signoz,
QuerierAPI: querierAPI.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.Querier, signoz.Analytics),
QueryParserAPI: queryparser.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.QueryParser),
Logger: opts.Logger,
})
if err != nil {
@@ -118,14 +118,12 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
}
func (ah *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *middleware.AuthZ) {
ah.APIHandler.RegisterCloudIntegrationsRoutes(router, am)
router.HandleFunc(
"/api/v1/cloud-integrations/{cloudProvider}/accounts/generate-connection-params",
am.EditAccess(ah.CloudIntegrationsGenerateConnectionParams),
).Methods(http.MethodGet)
}
func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) {

View File

@@ -6,6 +6,7 @@ import (
"encoding/json"
"fmt"
"io"
"log/slog"
"net/http"
"strings"
"time"
@@ -13,20 +14,14 @@ import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/modules/user"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/integrationstypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/gorilla/mux"
"go.uber.org/zap"
)
type CloudIntegrationConnectionParamsResponse struct {
IngestionUrl string `json:"ingestion_url,omitempty"`
IngestionKey string `json:"ingestion_key,omitempty"`
SigNozAPIUrl string `json:"signoz_api_url,omitempty"`
SigNozAPIKey string `json:"signoz_api_key,omitempty"`
}
// TODO: move this file with other cloud integration related code
func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
@@ -41,23 +36,21 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
return
}
cloudProvider := mux.Vars(r)["cloudProvider"]
if cloudProvider != "aws" {
RespondError(w, basemodel.BadRequest(fmt.Errorf(
"cloud provider not supported: %s", cloudProvider,
)), nil)
cloudProviderString := mux.Vars(r)["cloudProvider"]
cloudProvider, err := integrationstypes.NewCloudProvider(cloudProviderString)
if err != nil {
render.Error(w, err)
return
}
apiKey, apiErr := ah.getOrCreateCloudIntegrationPAT(r.Context(), claims.OrgID, cloudProvider)
if apiErr != nil {
RespondError(w, basemodel.WrapApiError(
apiErr, "couldn't provision PAT for cloud integration:",
), nil)
apiKey, err := ah.getOrCreateCloudIntegrationPAT(r.Context(), claims.OrgID, cloudProvider)
if err != nil {
render.Error(w, err)
return
}
result := CloudIntegrationConnectionParamsResponse{
result := integrationstypes.GettableCloudIntegrationConnectionParams{
SigNozAPIKey: apiKey,
}
@@ -71,16 +64,17 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
// Return the API Key (PAT) even if the rest of the params can not be deduced.
// Params not returned from here will be requested from the user via form inputs.
// This enables gracefully degraded but working experience even for non-cloud deployments.
zap.L().Info("ingestion params and signoz api url can not be deduced since no license was found")
ah.Respond(w, result)
ah.opts.Logger.InfoContext(
r.Context(),
"ingestion params and signoz api url can not be deduced since no license was found",
)
render.Success(w, http.StatusOK, result)
return
}
signozApiUrl, apiErr := ah.getIngestionUrlAndSigNozAPIUrl(r.Context(), license.Key)
if apiErr != nil {
RespondError(w, basemodel.WrapApiError(
apiErr, "couldn't deduce ingestion url and signoz api url",
), nil)
signozApiUrl, err := ah.getIngestionUrlAndSigNozAPIUrl(r.Context(), license.Key)
if err != nil {
render.Error(w, err)
return
}
@@ -89,48 +83,41 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
gatewayUrl := ah.opts.GatewayUrl
if len(gatewayUrl) > 0 {
ingestionKey, apiErr := getOrCreateCloudProviderIngestionKey(
ingestionKeyString, err := ah.getOrCreateCloudProviderIngestionKey(
r.Context(), gatewayUrl, license.Key, cloudProvider,
)
if apiErr != nil {
RespondError(w, basemodel.WrapApiError(
apiErr, "couldn't get or create ingestion key",
), nil)
if err != nil {
render.Error(w, err)
return
}
result.IngestionKey = ingestionKey
result.IngestionKey = ingestionKeyString
} else {
zap.L().Info("ingestion key can't be deduced since no gateway url has been configured")
ah.opts.Logger.InfoContext(
r.Context(),
"ingestion key can't be deduced since no gateway url has been configured",
)
}
ah.Respond(w, result)
render.Success(w, http.StatusOK, result)
}
func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId string, cloudProvider string) (
string, *basemodel.ApiError,
) {
func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId string, cloudProvider valuer.String) (string, error) {
integrationPATName := fmt.Sprintf("%s integration", cloudProvider)
integrationUser, apiErr := ah.getOrCreateCloudIntegrationUser(ctx, orgId, cloudProvider)
if apiErr != nil {
return "", apiErr
integrationUser, err := ah.getOrCreateCloudIntegrationUser(ctx, orgId, cloudProvider)
if err != nil {
return "", err
}
orgIdUUID, err := valuer.NewUUID(orgId)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't parse orgId: %w", err,
))
return "", err
}
allPats, err := ah.Signoz.Modules.User.ListAPIKeys(ctx, orgIdUUID)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't list PATs: %w", err,
))
return "", err
}
for _, p := range allPats {
if p.UserID == integrationUser.ID && p.Name == integrationPATName {
@@ -138,9 +125,10 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
}
}
zap.L().Info(
ah.opts.Logger.InfoContext(
ctx,
"no PAT found for cloud integration, creating a new one",
zap.String("cloudProvider", cloudProvider),
slog.String("cloudProvider", cloudProvider.String()),
)
newPAT, err := types.NewStorableAPIKey(
@@ -150,68 +138,48 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
0,
)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't create cloud integration PAT: %w", err,
))
return "", err
}
err = ah.Signoz.Modules.User.CreateAPIKey(ctx, newPAT)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't create cloud integration PAT: %w", err,
))
return "", err
}
return newPAT.Token, nil
}
func (ah *APIHandler) getOrCreateCloudIntegrationUser(
ctx context.Context, orgId string, cloudProvider string,
) (*types.User, *basemodel.ApiError) {
cloudIntegrationUserName := fmt.Sprintf("%s-integration", cloudProvider)
// TODO: move this function out of handler and use proper module structure
func (ah *APIHandler) getOrCreateCloudIntegrationUser(ctx context.Context, orgId string, cloudProvider valuer.String) (*types.User, error) {
cloudIntegrationUserName := fmt.Sprintf("%s-integration", cloudProvider.String())
email := valuer.MustNewEmail(fmt.Sprintf("%s@signoz.io", cloudIntegrationUserName))
cloudIntegrationUser, err := types.NewUser(cloudIntegrationUserName, email, types.RoleViewer, valuer.MustNewUUID(orgId))
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf("couldn't create cloud integration user: %w", err))
return nil, err
}
password := types.MustGenerateFactorPassword(cloudIntegrationUser.ID.StringValue())
cloudIntegrationUser, err = ah.Signoz.Modules.User.GetOrCreateUser(ctx, cloudIntegrationUser, user.WithFactorPassword(password))
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf("couldn't look for integration user: %w", err))
return nil, err
}
return cloudIntegrationUser, nil
}
func (ah *APIHandler) getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licenseKey string) (
string, *basemodel.ApiError,
) {
// TODO: remove this struct from here
type deploymentResponse struct {
Name string `json:"name"`
ClusterInfo struct {
Region struct {
DNS string `json:"dns"`
} `json:"region"`
} `json:"cluster"`
}
// TODO: move this function out of handler and use proper module structure
func (ah *APIHandler) getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licenseKey string) (string, error) {
respBytes, err := ah.Signoz.Zeus.GetDeployment(ctx, licenseKey)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't query for deployment info: error: %w", err,
))
return "", errors.WrapInternalf(err, errors.CodeInternal, "couldn't query for deployment info: error")
}
resp := new(deploymentResponse)
resp := new(integrationstypes.GettableDeployment)
err = json.Unmarshal(respBytes, resp)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't unmarshal deployment info response: error: %w", err,
))
return "", errors.WrapInternalf(err, errors.CodeInternal, "couldn't unmarshal deployment info response")
}
regionDns := resp.ClusterInfo.Region.DNS
@@ -219,9 +187,11 @@ func (ah *APIHandler) getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licens
if len(regionDns) < 1 || len(deploymentName) < 1 {
// Fail early if actual response structure and expectation here ever diverge
return "", basemodel.InternalError(fmt.Errorf(
return "", errors.WrapInternalf(
err,
errors.CodeInternal,
"deployment info response not in expected shape. couldn't determine region dns and deployment name",
))
)
}
signozApiUrl := fmt.Sprintf("https://%s.%s", deploymentName, regionDns)
@@ -229,102 +199,85 @@ func (ah *APIHandler) getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licens
return signozApiUrl, nil
}
type ingestionKey struct {
Name string `json:"name"`
Value string `json:"value"`
// other attributes from gateway response not included here since they are not being used.
}
type ingestionKeysSearchResponse struct {
Status string `json:"status"`
Data []ingestionKey `json:"data"`
Error string `json:"error"`
}
type createIngestionKeyResponse struct {
Status string `json:"status"`
Data ingestionKey `json:"data"`
Error string `json:"error"`
}
func getOrCreateCloudProviderIngestionKey(
ctx context.Context, gatewayUrl string, licenseKey string, cloudProvider string,
) (string, *basemodel.ApiError) {
func (ah *APIHandler) getOrCreateCloudProviderIngestionKey(
ctx context.Context, gatewayUrl string, licenseKey string, cloudProvider valuer.String,
) (string, error) {
cloudProviderKeyName := fmt.Sprintf("%s-integration", cloudProvider)
// see if the key already exists
searchResult, apiErr := requestGateway[ingestionKeysSearchResponse](
searchResult, err := requestGateway[integrationstypes.GettableIngestionKeysSearch](
ctx,
gatewayUrl,
licenseKey,
fmt.Sprintf("/v1/workspaces/me/keys/search?name=%s", cloudProviderKeyName),
nil,
ah.opts.Logger,
)
if apiErr != nil {
return "", basemodel.WrapApiError(
apiErr, "couldn't search for cloudprovider ingestion key",
)
if err != nil {
return "", err
}
if searchResult.Status != "success" {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't search for cloudprovider ingestion key: status: %s, error: %s",
return "", errors.NewInternalf(
errors.CodeInternal,
"couldn't search for cloud provider ingestion key: status: %s, error: %s",
searchResult.Status, searchResult.Error,
))
}
for _, k := range searchResult.Data {
if k.Name == cloudProviderKeyName {
if len(k.Value) < 1 {
// Fail early if actual response structure and expectation here ever diverge
return "", basemodel.InternalError(fmt.Errorf(
"ingestion keys search response not as expected",
))
}
return k.Value, nil
}
}
zap.L().Info(
"no existing ingestion key found for cloud integration, creating a new one",
zap.String("cloudProvider", cloudProvider),
)
createKeyResult, apiErr := requestGateway[createIngestionKeyResponse](
ctx, gatewayUrl, licenseKey, "/v1/workspaces/me/keys",
map[string]any{
"name": cloudProviderKeyName,
"tags": []string{"integration", cloudProvider},
},
)
if apiErr != nil {
return "", basemodel.WrapApiError(
apiErr, "couldn't create cloudprovider ingestion key",
)
}
for _, k := range searchResult.Data {
if k.Name != cloudProviderKeyName {
continue
}
if len(k.Value) < 1 {
// Fail early if actual response structure and expectation here ever diverge
return "", errors.NewInternalf(errors.CodeInternal, "ingestion keys search response not as expected")
}
return k.Value, nil
}
ah.opts.Logger.InfoContext(
ctx,
"no existing ingestion key found for cloud integration, creating a new one",
slog.String("cloudProvider", cloudProvider.String()),
)
createKeyResult, err := requestGateway[integrationstypes.GettableCreateIngestionKey](
ctx, gatewayUrl, licenseKey, "/v1/workspaces/me/keys",
map[string]any{
"name": cloudProviderKeyName,
"tags": []string{"integration", cloudProvider.String()},
},
ah.opts.Logger,
)
if err != nil {
return "", err
}
if createKeyResult.Status != "success" {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't create cloudprovider ingestion key: status: %s, error: %s",
return "", errors.NewInternalf(
errors.CodeInternal,
"couldn't create cloud provider ingestion key: status: %s, error: %s",
createKeyResult.Status, createKeyResult.Error,
))
)
}
ingestionKey := createKeyResult.Data.Value
if len(ingestionKey) < 1 {
ingestionKeyString := createKeyResult.Data.Value
if len(ingestionKeyString) < 1 {
// Fail early if actual response structure and expectation here ever diverge
return "", basemodel.InternalError(fmt.Errorf(
return "", errors.NewInternalf(errors.CodeInternal,
"ingestion key creation response not as expected",
))
)
}
return ingestionKey, nil
return ingestionKeyString, nil
}
func requestGateway[ResponseType any](
ctx context.Context, gatewayUrl string, licenseKey string, path string, payload any,
) (*ResponseType, *basemodel.ApiError) {
ctx context.Context, gatewayUrl, licenseKey, path string, payload any, logger *slog.Logger,
) (*ResponseType, error) {
baseUrl := strings.TrimSuffix(gatewayUrl, "/")
reqUrl := fmt.Sprintf("%s%s", baseUrl, path)
@@ -335,13 +288,12 @@ func requestGateway[ResponseType any](
"X-Consumer-Groups": "ns:default",
}
return requestAndParseResponse[ResponseType](ctx, reqUrl, headers, payload)
return requestAndParseResponse[ResponseType](ctx, reqUrl, headers, payload, logger)
}
func requestAndParseResponse[ResponseType any](
ctx context.Context, url string, headers map[string]string, payload any,
) (*ResponseType, *basemodel.ApiError) {
ctx context.Context, url string, headers map[string]string, payload any, logger *slog.Logger,
) (*ResponseType, error) {
reqMethod := http.MethodGet
var reqBody io.Reader
if payload != nil {
@@ -349,18 +301,14 @@ func requestAndParseResponse[ResponseType any](
bodyJson, err := json.Marshal(payload)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf(
"couldn't serialize request payload to JSON: %w", err,
))
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't marshal payload")
}
reqBody = bytes.NewBuffer([]byte(bodyJson))
reqBody = bytes.NewBuffer(bodyJson)
}
req, err := http.NewRequestWithContext(ctx, reqMethod, url, reqBody)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf(
"couldn't prepare request: %w", err,
))
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't create req")
}
for k, v := range headers {
@@ -373,23 +321,26 @@ func requestAndParseResponse[ResponseType any](
response, err := client.Do(req)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf("couldn't make request: %w", err))
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't make req")
}
defer response.Body.Close()
defer func() {
err = response.Body.Close()
if err != nil {
logger.ErrorContext(ctx, "couldn't close response body", "error", err)
}
}()
respBody, err := io.ReadAll(response.Body)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf("couldn't read response: %w", err))
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't read response body")
}
var resp ResponseType
err = json.Unmarshal(respBody, &resp)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf(
"couldn't unmarshal gateway response into %T", resp,
))
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't unmarshal response body")
}
return &resp, nil

View File

@@ -38,7 +38,6 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
"github.com/SigNoz/signoz/pkg/query-service/app/opamp"
@@ -127,13 +126,6 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
)
}
cloudIntegrationsController, err := cloudintegrations.NewController(signoz.SQLStore)
if err != nil {
return nil, fmt.Errorf(
"couldn't create cloud provider integrations controller: %w", err,
)
}
// ingestion pipelines manager
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
signoz.SQLStore,
@@ -167,12 +159,12 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
RulesManager: rm,
UsageManager: usageManager,
IntegrationsController: integrationsController,
CloudIntegrationsController: cloudIntegrationsController,
LogsParsingPipelineController: logParsingPipelineController,
FluxInterval: config.Querier.FluxInterval,
Gateway: gatewayProxy,
GatewayUrl: config.Gateway.URL.String(),
GlobalConfig: config.Global,
Logger: signoz.Instrumentation.Logger(),
}
apiHandler, err := api.NewAPIHandler(apiOpts, signoz)

View File

@@ -12,8 +12,6 @@ export interface MockUPlotInstance {
export interface MockUPlotPaths {
spline: jest.Mock;
bars: jest.Mock;
linear: jest.Mock;
stepped: jest.Mock;
}
// Create mock instance methods
@@ -25,23 +23,10 @@ const createMockUPlotInstance = (): MockUPlotInstance => ({
setSeries: jest.fn(),
});
// Path builder: (self, seriesIdx, idx0, idx1) => paths or null
const createMockPathBuilder = (name: string): jest.Mock =>
jest.fn(() => ({
name, // To test if the correct pathBuilder is used
stroke: jest.fn(),
fill: jest.fn(),
clip: jest.fn(),
}));
// Create mock paths - linear, spline, stepped needed by UPlotSeriesBuilder.getPathBuilder
const mockPaths = {
spline: jest.fn(() => createMockPathBuilder('spline')),
bars: jest.fn(() => createMockPathBuilder('bars')),
linear: jest.fn(() => createMockPathBuilder('linear')),
stepped: jest.fn((opts?: { align?: number }) =>
createMockPathBuilder(`stepped-(${opts?.align ?? 0})`),
),
// Create mock paths
const mockPaths: MockUPlotPaths = {
spline: jest.fn(),
bars: jest.fn(),
};
// Mock static methods

View File

@@ -83,7 +83,7 @@ export const prepareUPlotConfig = ({
drawStyle: DrawStyle.Line,
label: label,
colorMapping: widget.customLegendColors ?? {},
spanGaps: true,
spanGaps: false,
lineStyle: LineStyle.Solid,
lineInterpolation: LineInterpolation.Spline,
showPoints: VisibilityMode.Never,

View File

@@ -14,11 +14,6 @@ export interface GraphVisibilityState {
dataIndex: SeriesVisibilityItem[];
}
export interface SeriesVisibilityState {
labels: string[];
visibility: boolean[];
}
/**
* Context in which a panel is rendered. Used to vary behavior (e.g. persistence,
* interactions) per context.

View File

@@ -1,271 +0,0 @@
import { LOCALSTORAGE } from 'constants/localStorage';
import type { GraphVisibilityState } from '../../types';
import {
getStoredSeriesVisibility,
updateSeriesVisibilityToLocalStorage,
} from '../legendVisibilityUtils';
describe('legendVisibilityUtils', () => {
const storageKey = LOCALSTORAGE.GRAPH_VISIBILITY_STATES;
beforeEach(() => {
localStorage.clear();
jest.spyOn(window.localStorage.__proto__, 'getItem');
jest.spyOn(window.localStorage.__proto__, 'setItem');
});
afterEach(() => {
jest.restoreAllMocks();
});
describe('getStoredSeriesVisibility', () => {
it('returns null when there is no stored visibility state', () => {
const result = getStoredSeriesVisibility('widget-1');
expect(result).toBeNull();
expect(localStorage.getItem).toHaveBeenCalledWith(storageKey);
});
it('returns null when widget has no stored dataIndex', () => {
const stored: GraphVisibilityState[] = [
{
name: 'widget-1',
dataIndex: [],
},
];
localStorage.setItem(storageKey, JSON.stringify(stored));
const result = getStoredSeriesVisibility('widget-1');
expect(result).toBeNull();
});
it('returns visibility array by index when widget state exists', () => {
const stored: GraphVisibilityState[] = [
{
name: 'widget-1',
dataIndex: [
{ label: 'CPU', show: true },
{ label: 'Memory', show: false },
],
},
{
name: 'widget-2',
dataIndex: [{ label: 'Errors', show: true }],
},
];
localStorage.setItem(storageKey, JSON.stringify(stored));
const result = getStoredSeriesVisibility('widget-1');
expect(result).not.toBeNull();
expect(result).toEqual({
labels: ['CPU', 'Memory'],
visibility: [true, false],
});
});
it('returns visibility by index including duplicate labels', () => {
const stored: GraphVisibilityState[] = [
{
name: 'widget-1',
dataIndex: [
{ label: 'CPU', show: true },
{ label: 'CPU', show: false },
{ label: 'Memory', show: false },
],
},
];
localStorage.setItem(storageKey, JSON.stringify(stored));
const result = getStoredSeriesVisibility('widget-1');
expect(result).not.toBeNull();
expect(result).toEqual({
labels: ['CPU', 'CPU', 'Memory'],
visibility: [true, false, false],
});
});
it('returns null on malformed JSON in localStorage', () => {
localStorage.setItem(storageKey, '{invalid-json');
const result = getStoredSeriesVisibility('widget-1');
expect(result).toBeNull();
});
it('returns null when widget id is not found', () => {
const stored: GraphVisibilityState[] = [
{
name: 'another-widget',
dataIndex: [{ label: 'CPU', show: true }],
},
];
localStorage.setItem(storageKey, JSON.stringify(stored));
const result = getStoredSeriesVisibility('widget-1');
expect(result).toBeNull();
});
});
describe('updateSeriesVisibilityToLocalStorage', () => {
it('creates new visibility state when none exists', () => {
const seriesVisibility = [
{ label: 'CPU', show: true },
{ label: 'Memory', show: false },
];
updateSeriesVisibilityToLocalStorage('widget-1', seriesVisibility);
const stored = getStoredSeriesVisibility('widget-1');
expect(stored).not.toBeNull();
expect(stored).toEqual({
labels: ['CPU', 'Memory'],
visibility: [true, false],
});
});
it('adds a new widget entry when other widgets already exist', () => {
const existing: GraphVisibilityState[] = [
{
name: 'widget-existing',
dataIndex: [{ label: 'Errors', show: true }],
},
];
localStorage.setItem(storageKey, JSON.stringify(existing));
const newVisibility = [{ label: 'CPU', show: false }];
updateSeriesVisibilityToLocalStorage('widget-new', newVisibility);
const stored = getStoredSeriesVisibility('widget-new');
expect(stored).not.toBeNull();
expect(stored).toEqual({ labels: ['CPU'], visibility: [false] });
});
it('updates existing widget visibility when entry already exists', () => {
const initialVisibility: GraphVisibilityState[] = [
{
name: 'widget-1',
dataIndex: [
{ label: 'CPU', show: true },
{ label: 'Memory', show: true },
],
},
];
localStorage.setItem(storageKey, JSON.stringify(initialVisibility));
const updatedVisibility = [
{ label: 'CPU', show: false },
{ label: 'Memory', show: true },
];
updateSeriesVisibilityToLocalStorage('widget-1', updatedVisibility);
const stored = getStoredSeriesVisibility('widget-1');
expect(stored).not.toBeNull();
expect(stored).toEqual({
labels: ['CPU', 'Memory'],
visibility: [false, true],
});
});
it('silently handles malformed existing JSON without throwing', () => {
localStorage.setItem(storageKey, '{invalid-json');
expect(() =>
updateSeriesVisibilityToLocalStorage('widget-1', [
{ label: 'CPU', show: true },
]),
).not.toThrow();
});
it('when existing JSON is malformed, overwrites with valid data for the widget', () => {
localStorage.setItem(storageKey, '{invalid-json');
updateSeriesVisibilityToLocalStorage('widget-1', [
{ label: 'x-axis', show: true },
{ label: 'CPU', show: false },
]);
const stored = getStoredSeriesVisibility('widget-1');
expect(stored).not.toBeNull();
expect(stored).toEqual({
labels: ['x-axis', 'CPU'],
visibility: [true, false],
});
const expected = [
{
name: 'widget-1',
dataIndex: [
{ label: 'x-axis', show: true },
{ label: 'CPU', show: false },
],
},
];
expect(localStorage.setItem).toHaveBeenCalledWith(
storageKey,
JSON.stringify(expected),
);
});
it('preserves other widgets when updating one widget', () => {
const existing: GraphVisibilityState[] = [
{ name: 'widget-a', dataIndex: [{ label: 'A', show: true }] },
{ name: 'widget-b', dataIndex: [{ label: 'B', show: false }] },
];
localStorage.setItem(storageKey, JSON.stringify(existing));
updateSeriesVisibilityToLocalStorage('widget-b', [
{ label: 'B', show: true },
]);
expect(getStoredSeriesVisibility('widget-a')).toEqual({
labels: ['A'],
visibility: [true],
});
expect(getStoredSeriesVisibility('widget-b')).toEqual({
labels: ['B'],
visibility: [true],
});
});
it('calls setItem with storage key and stringified visibility states', () => {
updateSeriesVisibilityToLocalStorage('widget-1', [
{ label: 'CPU', show: true },
]);
expect(localStorage.setItem).toHaveBeenCalledTimes(1);
expect(localStorage.setItem).toHaveBeenCalledWith(
storageKey,
expect.any(String),
);
const [_, value] = (localStorage.setItem as jest.Mock).mock.calls[0];
expect((): void => JSON.parse(value)).not.toThrow();
expect(JSON.parse(value)).toEqual([
{ name: 'widget-1', dataIndex: [{ label: 'CPU', show: true }] },
]);
});
it('stores empty dataIndex when seriesVisibility is empty', () => {
updateSeriesVisibilityToLocalStorage('widget-1', []);
const raw = localStorage.getItem(storageKey);
expect(raw).not.toBeNull();
const parsed = JSON.parse(raw ?? '[]');
expect(parsed).toEqual([{ name: 'widget-1', dataIndex: [] }]);
expect(getStoredSeriesVisibility('widget-1')).toBeNull();
});
});
});

View File

@@ -88,7 +88,7 @@ export function buildBaseConfig({
max: undefined,
softMin: widget.softMin ?? undefined,
softMax: widget.softMax ?? undefined,
thresholds: thresholdOptions,
// thresholds,
logBase: widget.isLogScale ? 10 : undefined,
distribution: widget.isLogScale
? DistributionType.Logarithmic

View File

@@ -1,20 +1,15 @@
import { LOCALSTORAGE } from 'constants/localStorage';
import {
GraphVisibilityState,
SeriesVisibilityItem,
SeriesVisibilityState,
} from '../types';
import { GraphVisibilityState, SeriesVisibilityItem } from '../types';
/**
* Retrieves the stored series visibility for a specific widget from localStorage by index.
* Index 0 is the x-axis (time); indices 1, 2, ... are data series (same order as uPlot plot.series).
* Retrieves the visibility map for a specific widget from localStorage
* @param widgetId - The unique identifier of the widget
* @returns visibility[i] = show state for series at index i, or null if not found
* @returns A Map of series labels to their visibility state, or null if not found
*/
export function getStoredSeriesVisibility(
widgetId: string,
): SeriesVisibilityState | null {
): Map<string, boolean> | null {
try {
const storedData = localStorage.getItem(LOCALSTORAGE.GRAPH_VISIBILITY_STATES);
@@ -29,15 +24,8 @@ export function getStoredSeriesVisibility(
return null;
}
return {
labels: widgetState.dataIndex.map((item) => item.label),
visibility: widgetState.dataIndex.map((item) => item.show),
};
} catch (error) {
if (error instanceof SyntaxError) {
// If the stored data is malformed, remove it
localStorage.removeItem(LOCALSTORAGE.GRAPH_VISIBILITY_STATES);
}
return new Map(widgetState.dataIndex.map((item) => [item.label, item.show]));
} catch {
// Silently handle parsing errors - fall back to default visibility
return null;
}
@@ -47,31 +35,40 @@ export function updateSeriesVisibilityToLocalStorage(
widgetId: string,
seriesVisibility: SeriesVisibilityItem[],
): void {
let visibilityStates: GraphVisibilityState[] = [];
try {
const storedData = localStorage.getItem(LOCALSTORAGE.GRAPH_VISIBILITY_STATES);
visibilityStates = JSON.parse(storedData || '[]');
} catch (error) {
if (error instanceof SyntaxError) {
visibilityStates = [];
let visibilityStates: GraphVisibilityState[];
if (!storedData) {
visibilityStates = [
{
name: widgetId,
dataIndex: seriesVisibility,
},
];
} else {
visibilityStates = JSON.parse(storedData);
}
}
const widgetState = visibilityStates.find((state) => state.name === widgetId);
const widgetState = visibilityStates.find((state) => state.name === widgetId);
if (widgetState) {
widgetState.dataIndex = seriesVisibility;
} else {
visibilityStates = [
...visibilityStates,
{
name: widgetId,
dataIndex: seriesVisibility,
},
];
}
if (!widgetState) {
visibilityStates = [
...visibilityStates,
{
name: widgetId,
dataIndex: seriesVisibility,
},
];
} else {
widgetState.dataIndex = seriesVisibility;
}
localStorage.setItem(
LOCALSTORAGE.GRAPH_VISIBILITY_STATES,
JSON.stringify(visibilityStates),
);
localStorage.setItem(
LOCALSTORAGE.GRAPH_VISIBILITY_STATES,
JSON.stringify(visibilityStates),
);
} catch {
// Silently handle parsing errors - fall back to default visibility
}
}

View File

@@ -1,6 +1,5 @@
import { PANEL_TYPES } from 'constants/queryBuilder';
import TimeSeriesPanel from '../DashboardContainer/visualization/panels/TimeSeriesPanel/TimeSeriesPanel';
import HistogramPanelWrapper from './HistogramPanelWrapper';
import ListPanelWrapper from './ListPanelWrapper';
import PiePanelWrapper from './PiePanelWrapper';
@@ -9,7 +8,7 @@ import UplotPanelWrapper from './UplotPanelWrapper';
import ValuePanelWrapper from './ValuePanelWrapper';
export const PanelTypeVsPanelWrapper = {
[PANEL_TYPES.TIME_SERIES]: TimeSeriesPanel,
[PANEL_TYPES.TIME_SERIES]: UplotPanelWrapper,
[PANEL_TYPES.TABLE]: TablePanelWrapper,
[PANEL_TYPES.LIST]: ListPanelWrapper,
[PANEL_TYPES.VALUE]: ValuePanelWrapper,

View File

@@ -5,8 +5,8 @@ import cx from 'classnames';
import { LegendItem } from 'lib/uPlotV2/config/types';
import useLegendsSync from 'lib/uPlotV2/hooks/useLegendsSync';
import { useLegendActions } from '../../hooks/useLegendActions';
import { LegendPosition, LegendProps } from '../types';
import { useLegendActions } from './useLegendActions';
import './Legend.styles.scss';
@@ -106,7 +106,6 @@ export default function Legend({
placeholder="Search..."
value={legendSearchQuery}
onChange={(e): void => setLegendSearchQuery(e.target.value)}
data-testid="legend-search-input"
className="legend-search-input"
/>
</div>

View File

@@ -1,213 +0,0 @@
import React from 'react';
import { render, RenderResult, screen } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import { LegendItem } from 'lib/uPlotV2/config/types';
import useLegendsSync from 'lib/uPlotV2/hooks/useLegendsSync';
import { useLegendActions } from '../../hooks/useLegendActions';
import Legend from '../Legend/Legend';
import { LegendPosition } from '../types';
jest.mock('react-virtuoso', () => ({
VirtuosoGrid: ({
data,
itemContent,
className,
}: {
data: LegendItem[];
itemContent: (index: number, item: LegendItem) => React.ReactNode;
className?: string;
}): JSX.Element => (
<div data-testid="virtuoso-grid" className={className}>
{data.map((item, index) => (
<div key={item.seriesIndex ?? index} data-testid="legend-item-wrapper">
{itemContent(index, item)}
</div>
))}
</div>
),
}));
jest.mock('lib/uPlotV2/hooks/useLegendsSync');
jest.mock('lib/uPlotV2/hooks/useLegendActions');
const mockUseLegendsSync = useLegendsSync as jest.MockedFunction<
typeof useLegendsSync
>;
const mockUseLegendActions = useLegendActions as jest.MockedFunction<
typeof useLegendActions
>;
describe('Legend', () => {
const baseLegendItemsMap = {
0: {
seriesIndex: 0,
label: 'A',
show: true,
color: '#ff0000',
},
1: {
seriesIndex: 1,
label: 'B',
show: false,
color: '#00ff00',
},
2: {
seriesIndex: 2,
label: 'C',
show: true,
color: '#0000ff',
},
};
let onLegendClick: jest.Mock;
let onLegendMouseMove: jest.Mock;
let onLegendMouseLeave: jest.Mock;
let onFocusSeries: jest.Mock;
beforeEach(() => {
onLegendClick = jest.fn();
onLegendMouseMove = jest.fn();
onLegendMouseLeave = jest.fn();
onFocusSeries = jest.fn();
mockUseLegendsSync.mockReturnValue({
legendItemsMap: baseLegendItemsMap,
focusedSeriesIndex: 1,
setFocusedSeriesIndex: jest.fn(),
});
mockUseLegendActions.mockReturnValue({
onLegendClick,
onLegendMouseMove,
onLegendMouseLeave,
onFocusSeries,
});
});
afterEach(() => {
jest.clearAllMocks();
});
const renderLegend = (position?: LegendPosition): RenderResult =>
render(
<Legend
position={position}
// config is not used directly in the component, it's consumed by the mocked hook
config={{} as any}
/>,
);
describe('layout and position', () => {
it('renders search input when legend position is RIGHT', () => {
renderLegend(LegendPosition.RIGHT);
expect(screen.getByTestId('legend-search-input')).toBeInTheDocument();
});
it('does not render search input when legend position is BOTTOM (default)', () => {
renderLegend();
expect(screen.queryByTestId('legend-search-input')).not.toBeInTheDocument();
});
it('renders the marker with the correct border color', () => {
renderLegend(LegendPosition.RIGHT);
const legendMarker = document.querySelector(
'[data-legend-item-id="0"] [data-is-legend-marker="true"]',
) as HTMLElement;
expect(legendMarker).toHaveStyle({
'border-color': '#ff0000',
});
});
it('renders all legend items in the grid by default', () => {
renderLegend(LegendPosition.RIGHT);
expect(screen.getByTestId('virtuoso-grid')).toBeInTheDocument();
expect(screen.getByText('A')).toBeInTheDocument();
expect(screen.getByText('B')).toBeInTheDocument();
expect(screen.getByText('C')).toBeInTheDocument();
});
});
describe('search behavior (RIGHT position)', () => {
it('filters legend items based on search query (case-insensitive)', async () => {
const user = userEvent.setup();
renderLegend(LegendPosition.RIGHT);
const searchInput = screen.getByTestId('legend-search-input');
await user.type(searchInput, 'A');
expect(screen.getByText('A')).toBeInTheDocument();
expect(screen.queryByText('B')).not.toBeInTheDocument();
expect(screen.queryByText('C')).not.toBeInTheDocument();
});
it('shows empty state when no legend items match the search query', async () => {
const user = userEvent.setup();
renderLegend(LegendPosition.RIGHT);
const searchInput = screen.getByTestId('legend-search-input');
await user.type(searchInput, 'network');
expect(
screen.getByText(/No series found matching "network"/i),
).toBeInTheDocument();
expect(screen.queryByTestId('virtuoso-grid')).not.toBeInTheDocument();
});
it('does not filter or show empty state when search query is empty or only whitespace', async () => {
const user = userEvent.setup();
renderLegend(LegendPosition.RIGHT);
const searchInput = screen.getByTestId('legend-search-input');
await user.type(searchInput, ' ');
expect(
screen.queryByText(/No series found matching/i),
).not.toBeInTheDocument();
expect(screen.getByText('A')).toBeInTheDocument();
expect(screen.getByText('B')).toBeInTheDocument();
expect(screen.getByText('C')).toBeInTheDocument();
});
});
describe('legend actions', () => {
it('calls onLegendClick when a legend item is clicked', async () => {
const user = userEvent.setup();
renderLegend(LegendPosition.RIGHT);
await user.click(screen.getByText('A'));
expect(onLegendClick).toHaveBeenCalledTimes(1);
});
it('calls mouseMove when the mouse moves over a legend item', async () => {
const user = userEvent.setup();
renderLegend(LegendPosition.RIGHT);
const legendItem = document.querySelector(
'[data-legend-item-id="0"]',
) as HTMLElement;
await user.hover(legendItem);
expect(onLegendMouseMove).toHaveBeenCalledTimes(1);
});
it('calls onLegendMouseLeave when the mouse leaves the legend container', async () => {
const user = userEvent.setup();
renderLegend(LegendPosition.RIGHT);
const container = document.querySelector('.legend-container') as HTMLElement;
await user.hover(container);
await user.unhover(container);
expect(onLegendMouseLeave).toHaveBeenCalledTimes(1);
});
});
});

View File

@@ -4,12 +4,12 @@ import uPlot, { Axis } from 'uplot';
import { uPlotXAxisValuesFormat } from '../../uPlotLib/utils/constants';
import getGridColor from '../../uPlotLib/utils/getGridColor';
import { buildYAxisSizeCalculator } from '../utils/axis';
import { AxisProps, ConfigBuilder } from './types';
const PANEL_TYPES_WITH_X_AXIS_DATETIME_FORMAT = [
PANEL_TYPES.TIME_SERIES,
PANEL_TYPES.BAR,
PANEL_TYPES.PIE,
];
/**
@@ -114,6 +114,81 @@ export class UPlotAxisBuilder extends ConfigBuilder<AxisProps, Axis> {
: undefined;
}
/**
* Calculate axis size from existing size property
*/
private getExistingAxisSize(
self: uPlot,
axis: Axis,
values: string[] | undefined,
axisIdx: number,
cycleNum: number,
): number {
const internalSize = (axis as { _size?: number })._size;
if (internalSize !== undefined) {
return internalSize;
}
const existingSize = axis.size;
if (typeof existingSize === 'function') {
return existingSize(self, values ?? [], axisIdx, cycleNum);
}
return existingSize ?? 0;
}
/**
* Calculate text width for longest value
*/
private calculateTextWidth(
self: uPlot,
axis: Axis,
values: string[] | undefined,
): number {
if (!values || values.length === 0) {
return 0;
}
// Find longest value
const longestVal = values.reduce(
(acc, val) => (val.length > acc.length ? val : acc),
'',
);
if (longestVal === '' || !axis.font?.[0]) {
return 0;
}
// eslint-disable-next-line prefer-destructuring, no-param-reassign
self.ctx.font = axis.font[0];
return self.ctx.measureText(longestVal).width / devicePixelRatio;
}
/**
* Build Y-axis dynamic size calculator
*/
private buildYAxisSizeCalculator(): uPlot.Axis.Size {
return (
self: uPlot,
values: string[] | undefined,
axisIdx: number,
cycleNum: number,
): number => {
const axis = self.axes[axisIdx];
// Bail out, force convergence
if (cycleNum > 1) {
return this.getExistingAxisSize(self, axis, values, axisIdx, cycleNum);
}
const gap = this.props.gap ?? 5;
let axisSize = (axis.ticks?.size ?? 0) + gap;
axisSize += this.calculateTextWidth(self, axis, values);
return Math.ceil(axisSize);
};
}
/**
* Build dynamic size calculator for Y-axis
*/
@@ -127,7 +202,7 @@ export class UPlotAxisBuilder extends ConfigBuilder<AxisProps, Axis> {
// Y-axis needs dynamic sizing based on text width
if (scaleKey === 'y') {
return buildYAxisSizeCalculator(this.props.gap ?? 5);
return this.buildYAxisSizeCalculator();
}
return undefined;

View File

@@ -1,4 +1,3 @@
import { SeriesVisibilityState } from 'container/DashboardContainer/visualization/panels/types';
import { getStoredSeriesVisibility } from 'container/DashboardContainer/visualization/panels/utils/legendVisibilityUtils';
import { ThresholdsDrawHookOptions } from 'lib/uPlotV2/hooks/types';
import { thresholdsDrawHook } from 'lib/uPlotV2/hooks/useThresholdsDrawHook';
@@ -236,9 +235,9 @@ export class UPlotConfigBuilder extends ConfigBuilder<
}
/**
* Returns stored series visibility by index from localStorage when preferences source is LOCAL_STORAGE, otherwise null.
* Returns stored series visibility map from localStorage when preferences source is LOCAL_STORAGE, otherwise null.
*/
private getStoredVisibility(): SeriesVisibilityState | null {
private getStoredVisibilityMap(): Map<string, boolean> | null {
if (
this.widgetId &&
this.selectionPreferencesSource === SelectionPreferencesSource.LOCAL_STORAGE
@@ -252,23 +251,22 @@ export class UPlotConfigBuilder extends ConfigBuilder<
* Get legend items with visibility state restored from localStorage if available
*/
getLegendItems(): Record<number, LegendItem> {
const seriesVisibilityState = this.getStoredVisibility();
const isAnySeriesHidden = !!seriesVisibilityState?.visibility?.some(
(show) => !show,
const visibilityMap = this.getStoredVisibilityMap();
const isAnySeriesHidden = !!(
visibilityMap && Array.from(visibilityMap.values()).some((show) => !show)
);
return this.series.reduce((acc, s: UPlotSeriesBuilder, index: number) => {
const seriesConfig = s.getConfig();
const label = seriesConfig.label ?? '';
// +1 because uPlot series 0 is x-axis/time; data series are at 1, 2, ... (also matches stored visibility[0]=time, visibility[1]=first data, ...)
const seriesIndex = index + 1;
const show = resolveSeriesVisibility({
seriesIndex,
seriesShow: seriesConfig.show,
seriesLabel: label,
seriesVisibilityState,
const seriesIndex = index + 1; // +1 because the first series is the timestamp
const show = resolveSeriesVisibility(
label,
seriesConfig.show,
visibilityMap,
isAnySeriesHidden,
});
);
acc[seriesIndex] = {
seriesIndex,
@@ -296,23 +294,22 @@ export class UPlotConfigBuilder extends ConfigBuilder<
...DEFAULT_PLOT_CONFIG,
};
const seriesVisibilityState = this.getStoredVisibility();
const isAnySeriesHidden = !!seriesVisibilityState?.visibility?.some(
(show) => !show,
const visibilityMap = this.getStoredVisibilityMap();
const isAnySeriesHidden = !!(
visibilityMap && Array.from(visibilityMap.values()).some((show) => !show)
);
config.series = [
{ value: (): string => '' }, // Base series for timestamp
...this.series.map((s, index) => {
...this.series.map((s) => {
const series = s.getConfig();
// Stored visibility[0] is x-axis/time; data series start at visibility[1]
const visible = resolveSeriesVisibility({
seriesIndex: index + 1,
seriesShow: series.show,
seriesLabel: series.label ?? '',
seriesVisibilityState,
const label = series.label ?? '';
const visible = resolveSeriesVisibility(
label,
series.show,
visibilityMap,
isAnySeriesHidden,
});
);
return {
...series,
show: visible,

View File

@@ -15,33 +15,7 @@ import {
* Builder for uPlot series configuration
* Handles creation of series settings
*/
/**
* Path builders are static and shared across all instances of UPlotSeriesBuilder
*/
let builders: PathBuilders | null = null;
export class UPlotSeriesBuilder extends ConfigBuilder<SeriesProps, Series> {
constructor(props: SeriesProps) {
super(props);
const pathBuilders = uPlot.paths;
if (!builders) {
const linearBuilder = pathBuilders.linear;
const splineBuilder = pathBuilders.spline;
const steppedBuilder = pathBuilders.stepped;
if (!linearBuilder || !splineBuilder || !steppedBuilder) {
throw new Error('Required uPlot path builders are not available');
}
builders = {
linear: linearBuilder(),
spline: splineBuilder(),
stepBefore: steppedBuilder({ align: -1 }),
stepAfter: steppedBuilder({ align: 1 }),
};
}
}
private buildLineConfig({
lineColor,
lineWidth,
@@ -224,6 +198,8 @@ interface PathBuilders {
[key: string]: Series.PathBuilder;
}
let builders: PathBuilders | null = null;
/**
* Get path builder based on draw style and interpolation
*/
@@ -231,8 +207,23 @@ function getPathBuilder(
style: DrawStyle,
lineInterpolation?: LineInterpolation,
): Series.PathBuilder {
const pathBuilders = uPlot.paths;
if (!builders) {
throw new Error('Required uPlot path builders are not available');
const linearBuilder = pathBuilders.linear;
const splineBuilder = pathBuilders.spline;
const steppedBuilder = pathBuilders.stepped;
if (!linearBuilder || !splineBuilder || !steppedBuilder) {
throw new Error('Required uPlot path builders are not available');
}
builders = {
linear: linearBuilder(),
spline: splineBuilder(),
stepBefore: steppedBuilder({ align: -1 }),
stepAfter: steppedBuilder({ align: 1 }),
};
}
if (style === DrawStyle.Line) {

View File

@@ -1,393 +0,0 @@
import { getToolTipValue } from 'components/Graph/yAxisConfig';
import { PANEL_TYPES } from 'constants/queryBuilder';
import { uPlotXAxisValuesFormat } from 'lib/uPlotLib/utils/constants';
import type uPlot from 'uplot';
import type { AxisProps } from '../types';
import { UPlotAxisBuilder } from '../UPlotAxisBuilder';
jest.mock('components/Graph/yAxisConfig', () => ({
getToolTipValue: jest.fn(),
}));
const createAxisProps = (overrides: Partial<AxisProps> = {}): AxisProps => ({
scaleKey: 'x',
label: 'Time',
isDarkMode: false,
show: true,
...overrides,
});
describe('UPlotAxisBuilder', () => {
beforeEach(() => {
jest.clearAllMocks();
});
it('builds basic axis config with defaults', () => {
const builder = new UPlotAxisBuilder(
createAxisProps({
scaleKey: 'x',
label: 'Time',
}),
);
const config = builder.getConfig();
expect(config.scale).toBe('x');
expect(config.label).toBe('Time');
expect(config.show).toBe(true);
expect(config.side).toBe(2);
expect(config.gap).toBe(5);
// Default grid and ticks are created
expect(config.grid).toEqual({
stroke: 'rgba(0,0,0,0.5)',
width: 0.2,
show: true,
});
expect(config.ticks).toEqual({
width: 0.3,
show: true,
});
});
it('sets config values when provided', () => {
const builder = new UPlotAxisBuilder(
createAxisProps({
scaleKey: 'x',
label: 'Time',
show: false,
side: 0,
gap: 10,
grid: {
stroke: '#ff0000',
width: 1,
show: false,
},
ticks: {
stroke: '#00ff00',
width: 1,
show: false,
size: 10,
},
values: ['1', '2', '3'],
space: 20,
size: 100,
stroke: '#0000ff',
}),
);
const config = builder.getConfig();
expect(config.scale).toBe('x');
expect(config.label).toBe('Time');
expect(config.show).toBe(false);
expect(config.gap).toBe(10);
expect(config.grid).toEqual({
stroke: '#ff0000',
width: 1,
show: false,
});
expect(config.ticks).toEqual({
stroke: '#00ff00',
width: 1,
show: false,
size: 10,
});
expect(config.values).toEqual(['1', '2', '3']);
expect(config.space).toBe(20);
expect(config.size).toBe(100);
expect(config.stroke).toBe('#0000ff');
});
it('merges custom grid config over defaults and respects isDarkMode and isLogScale', () => {
const builder = new UPlotAxisBuilder(
createAxisProps({
isDarkMode: true,
isLogScale: true,
grid: {
width: 1,
},
}),
);
const config = builder.getConfig();
expect(config.grid).toEqual({
// stroke falls back to theme-based default when not provided
stroke: 'rgba(231,233,237,0.3)',
// provided width overrides default
width: 1,
// show falls back to default when not provided
show: true,
});
});
it('uses provided ticks config when present and falls back to defaults otherwise', () => {
const customTicks = { width: 1, show: false };
const withTicks = new UPlotAxisBuilder(
createAxisProps({
ticks: customTicks,
}),
);
const withoutTicks = new UPlotAxisBuilder(createAxisProps());
expect(withTicks.getConfig().ticks).toBe(customTicks);
expect(withoutTicks.getConfig().ticks).toEqual({
width: 0.3,
show: true,
});
});
it('uses time-based X-axis values formatter for time-series like panels', () => {
const builder = new UPlotAxisBuilder(
createAxisProps({
scaleKey: 'x',
panelType: PANEL_TYPES.TIME_SERIES,
}),
);
const config = builder.getConfig();
expect(config.values).toBe(uPlotXAxisValuesFormat);
});
it('does not attach X-axis datetime formatter when panel type is not supported', () => {
const builder = new UPlotAxisBuilder(
createAxisProps({
scaleKey: 'x',
panelType: PANEL_TYPES.LIST, // not in PANEL_TYPES_WITH_X_AXIS_DATETIME_FORMAT
}),
);
const config = builder.getConfig();
expect(config.values).toBeUndefined();
});
it('builds Y-axis values formatter that delegates to getToolTipValue', () => {
const yBuilder = new UPlotAxisBuilder(
createAxisProps({
scaleKey: 'y',
yAxisUnit: 'ms',
decimalPrecision: 3,
}),
);
const config = yBuilder.getConfig();
expect(typeof config.values).toBe('function');
(getToolTipValue as jest.Mock).mockImplementation(
(value: string, unit?: string, precision?: unknown) =>
`formatted:${value}:${unit}:${precision}`,
);
// Simulate uPlot calling the values formatter
const valuesFn = (config.values as unknown) as (
self: uPlot,
vals: unknown[],
) => string[];
const result = valuesFn({} as uPlot, [1, null, 2, Number.NaN]);
expect(getToolTipValue).toHaveBeenCalledTimes(2);
expect(getToolTipValue).toHaveBeenNthCalledWith(1, '1', 'ms', 3);
expect(getToolTipValue).toHaveBeenNthCalledWith(2, '2', 'ms', 3);
// Null/NaN values should map to empty strings
expect(result).toEqual(['formatted:1:ms:3', '', 'formatted:2:ms:3', '']);
});
it('adds dynamic size calculator only for Y-axis when size is not provided', () => {
const yBuilder = new UPlotAxisBuilder(
createAxisProps({
scaleKey: 'y',
}),
);
const xBuilder = new UPlotAxisBuilder(
createAxisProps({
scaleKey: 'x',
}),
);
const yConfig = yBuilder.getConfig();
const xConfig = xBuilder.getConfig();
expect(typeof yConfig.size).toBe('function');
expect(xConfig.size).toBeUndefined();
});
it('uses explicit size function when provided', () => {
const sizeFn: uPlot.Axis.Size = jest.fn(() => 100) as uPlot.Axis.Size;
const builder = new UPlotAxisBuilder(
createAxisProps({
scaleKey: 'y',
size: sizeFn,
}),
);
const config = builder.getConfig();
expect(config.size).toBe(sizeFn);
});
it('builds stroke color based on stroke and isDarkMode', () => {
const explicitStroke = new UPlotAxisBuilder(
createAxisProps({
stroke: '#ff0000',
}),
);
const darkStroke = new UPlotAxisBuilder(
createAxisProps({
stroke: undefined,
isDarkMode: true,
}),
);
const lightStroke = new UPlotAxisBuilder(
createAxisProps({
stroke: undefined,
isDarkMode: false,
}),
);
expect(explicitStroke.getConfig().stroke).toBe('#ff0000');
expect(darkStroke.getConfig().stroke).toBe('white');
expect(lightStroke.getConfig().stroke).toBe('black');
});
it('uses explicit values formatter when provided', () => {
const customValues: uPlot.Axis.Values = jest.fn(() => ['a', 'b', 'c']);
const builder = new UPlotAxisBuilder(
createAxisProps({
scaleKey: 'y',
values: customValues,
}),
);
const config = builder.getConfig();
expect(config.values).toBe(customValues);
});
it('returns undefined values for scaleKey neither x nor y', () => {
const builder = new UPlotAxisBuilder(createAxisProps({ scaleKey: 'custom' }));
const config = builder.getConfig();
expect(config.values).toBeUndefined();
});
it('includes space in config when provided', () => {
const builder = new UPlotAxisBuilder(
createAxisProps({ scaleKey: 'y', space: 50 }),
);
const config = builder.getConfig();
expect(config.space).toBe(50);
});
it('includes PANEL_TYPES.BAR and PANEL_TYPES.TIME_SERIES in X-axis datetime formatter', () => {
const barBuilder = new UPlotAxisBuilder(
createAxisProps({
scaleKey: 'x',
panelType: PANEL_TYPES.BAR,
}),
);
expect(barBuilder.getConfig().values).toBe(uPlotXAxisValuesFormat);
const timeSeriesBuilder = new UPlotAxisBuilder(
createAxisProps({
scaleKey: 'x',
panelType: PANEL_TYPES.TIME_SERIES,
}),
);
expect(timeSeriesBuilder.getConfig().values).toBe(uPlotXAxisValuesFormat);
});
it('should return the existing size when cycleNum > 1', () => {
const builder = new UPlotAxisBuilder(createAxisProps({ scaleKey: 'y' }));
const config = builder.getConfig();
const sizeFn = config.size;
expect(typeof sizeFn).toBe('function');
const mockAxis = {
_size: 80,
ticks: { size: 10 },
font: ['12px sans-serif'],
};
const mockSelf = ({
axes: [mockAxis],
ctx: { measureText: jest.fn(() => ({ width: 60 })), font: '' },
} as unknown) as uPlot;
const result = (sizeFn as (
s: uPlot,
v: string[],
a: number,
c: number,
) => number)(
mockSelf,
['100', '200'],
0,
2, // cycleNum > 1
);
expect(result).toBe(80);
});
it('should invoke the size calculator and compute from text width when cycleNum <= 1', () => {
const builder = new UPlotAxisBuilder(
createAxisProps({ scaleKey: 'y', gap: 8 }),
);
const config = builder.getConfig();
const sizeFn = config.size;
expect(typeof sizeFn).toBe('function');
const mockAxis = {
ticks: { size: 12 },
font: ['12px sans-serif'],
};
const measureText = jest.fn(() => ({ width: 48 }));
const mockSelf = ({
axes: [mockAxis],
ctx: {
measureText,
get font() {
return '';
},
set font(_v: string) {
/* noop */
},
},
} as unknown) as uPlot;
const result = (sizeFn as (
s: uPlot,
v: string[],
a: number,
c: number,
) => number)(
mockSelf,
['10', '2000ms'],
0,
0, // cycleNum <= 1
);
expect(measureText).toHaveBeenCalledWith('2000ms');
expect(result).toBeGreaterThanOrEqual(12 + 8);
});
it('merge updates axis props', () => {
const builder = new UPlotAxisBuilder(
createAxisProps({ scaleKey: 'y', label: 'Original' }),
);
builder.merge({ label: 'Merged', yAxisUnit: 'bytes' });
const config = builder.getConfig();
expect(config.label).toBe('Merged');
expect(config.values).toBeDefined();
});
});

View File

@@ -1,337 +0,0 @@
import { PANEL_TYPES } from 'constants/queryBuilder';
import uPlot from 'uplot';
import type { SeriesProps } from '../types';
import { DrawStyle, SelectionPreferencesSource } from '../types';
import { UPlotConfigBuilder } from '../UPlotConfigBuilder';
// Mock only the real boundary that hits localStorage
jest.mock(
'container/DashboardContainer/visualization/panels/utils/legendVisibilityUtils',
() => ({
getStoredSeriesVisibility: jest.fn(),
}),
);
const getStoredSeriesVisibilityMock = jest.requireMock(
'container/DashboardContainer/visualization/panels/utils/legendVisibilityUtils',
) as {
getStoredSeriesVisibility: jest.Mock;
};
describe('UPlotConfigBuilder', () => {
beforeEach(() => {
jest.clearAllMocks();
});
const createSeriesProps = (
overrides: Partial<SeriesProps> = {},
): SeriesProps => ({
scaleKey: 'y',
label: 'Requests',
colorMapping: {},
drawStyle: DrawStyle.Line,
panelType: PANEL_TYPES.TIME_SERIES,
...overrides,
});
it('returns correct save selection preference flag from constructor args', () => {
const builder = new UPlotConfigBuilder({
shouldSaveSelectionPreference: true,
});
expect(builder.getShouldSaveSelectionPreference()).toBe(true);
});
it('returns widgetId from constructor args', () => {
const builder = new UPlotConfigBuilder({ widgetId: 'widget-123' });
expect(builder.getWidgetId()).toBe('widget-123');
});
it('sets tzDate from constructor and includes it in config', () => {
const tzDate = (ts: number): Date => new Date(ts);
const builder = new UPlotConfigBuilder({ tzDate });
const config = builder.getConfig();
expect(config.tzDate).toBe(tzDate);
});
it('does not call onDragSelect for click without drag (width === 0)', () => {
const onDragSelect = jest.fn();
const builder = new UPlotConfigBuilder({ onDragSelect });
const config = builder.getConfig();
const setSelectHooks = config.hooks?.setSelect ?? [];
expect(setSelectHooks.length).toBe(1);
const uplotInstance = ({
select: { left: 10, width: 0 },
posToVal: jest.fn(),
} as unknown) as uPlot;
// Simulate uPlot calling the hook
const setSelectHook = setSelectHooks[0];
setSelectHook!(uplotInstance);
expect(onDragSelect).not.toHaveBeenCalled();
});
it('calls onDragSelect with start and end times in milliseconds for a drag selection', () => {
const onDragSelect = jest.fn();
const builder = new UPlotConfigBuilder({ onDragSelect });
const config = builder.getConfig();
const setSelectHooks = config.hooks?.setSelect ?? [];
expect(setSelectHooks.length).toBe(1);
const posToVal = jest
.fn()
// left position
.mockReturnValueOnce(100)
// left + width
.mockReturnValueOnce(110);
const uplotInstance = ({
select: { left: 50, width: 20 },
posToVal,
} as unknown) as uPlot;
const setSelectHook = setSelectHooks[0];
setSelectHook!(uplotInstance);
expect(onDragSelect).toHaveBeenCalledTimes(1);
// 100 and 110 seconds converted to milliseconds
expect(onDragSelect).toHaveBeenCalledWith(100_000, 110_000);
});
it('adds and removes hooks via addHook, and exposes them through getConfig', () => {
const builder = new UPlotConfigBuilder();
const drawHook = jest.fn();
const remove = builder.addHook('draw', drawHook as uPlot.Hooks.Defs['draw']);
let config = builder.getConfig();
expect(config.hooks?.draw).toContain(drawHook);
// Remove and ensure it no longer appears in config
remove();
config = builder.getConfig();
expect(config.hooks?.draw ?? []).not.toContain(drawHook);
});
it('adds axes, scales, and series and wires them into the final config', () => {
const builder = new UPlotConfigBuilder();
// Add axis and scale
builder.addAxis({ scaleKey: 'y', label: 'Requests' });
builder.addScale({ scaleKey: 'y' });
// Add two series legend indices should start from 1 (0 is the timestamp series)
builder.addSeries(createSeriesProps({ label: 'Requests' }));
builder.addSeries(createSeriesProps({ label: 'Errors' }));
const config = builder.getConfig();
// Axes
expect(config.axes).toHaveLength(1);
expect(config.axes?.[0].scale).toBe('y');
// Scales are returned as an object keyed by scaleKey
expect(config.scales).toBeDefined();
expect(Object.keys(config.scales ?? {})).toContain('y');
// Series: base timestamp + 2 data series
expect(config.series).toHaveLength(3);
// Base series (index 0) has a value formatter that returns empty string
const baseSeries = config.series?.[0] as { value?: () => string };
expect(typeof baseSeries?.value).toBe('function');
expect(baseSeries?.value?.()).toBe('');
// Legend items align with series and carry label and color from series config
const legendItems = builder.getLegendItems();
expect(Object.keys(legendItems)).toEqual(['1', '2']);
expect(legendItems[1].seriesIndex).toBe(1);
expect(legendItems[1].label).toBe('Requests');
expect(legendItems[2].label).toBe('Errors');
});
it('merges axis when addAxis is called twice with same scaleKey', () => {
const builder = new UPlotConfigBuilder();
builder.addAxis({ scaleKey: 'y', label: 'Requests' });
builder.addAxis({ scaleKey: 'y', label: 'Updated Label', show: false });
const config = builder.getConfig();
expect(config.axes).toHaveLength(1);
expect(config.axes?.[0].label).toBe('Updated Label');
expect(config.axes?.[0].show).toBe(false);
});
it('merges scale when addScale is called twice with same scaleKey', () => {
const builder = new UPlotConfigBuilder();
builder.addScale({ scaleKey: 'y', min: 0 });
builder.addScale({ scaleKey: 'y', max: 100 });
const config = builder.getConfig();
// Only one scale entry for 'y' (merge path used, no duplicate added)
expect(config.scales).toBeDefined();
const scales = config.scales ?? {};
expect(Object.keys(scales)).toEqual(['y']);
expect(scales.y?.range).toBeDefined();
});
it('restores visibility state from localStorage when selectionPreferencesSource is LOCAL_STORAGE', () => {
// Index 0 = x-axis/time; indices 1,2 = data series (Requests, Errors). resolveSeriesVisibility matches by seriesIndex + seriesLabel.
getStoredSeriesVisibilityMock.getStoredSeriesVisibility.mockReturnValue({
labels: ['x-axis', 'Requests', 'Errors'],
visibility: [true, true, false],
});
const builder = new UPlotConfigBuilder({
widgetId: 'widget-1',
selectionPreferencesSource: SelectionPreferencesSource.LOCAL_STORAGE,
});
builder.addSeries(createSeriesProps({ label: 'Requests' }));
builder.addSeries(createSeriesProps({ label: 'Errors' }));
const legendItems = builder.getLegendItems();
// When any series is hidden, legend visibility is driven by the stored map
expect(legendItems[1].show).toBe(true);
expect(legendItems[2].show).toBe(false);
const config = builder.getConfig();
const [, firstSeries, secondSeries] = config.series ?? [];
expect(firstSeries?.show).toBe(true);
expect(secondSeries?.show).toBe(false);
});
it('does not attempt to read stored visibility when using in-memory preferences', () => {
const builder = new UPlotConfigBuilder({
widgetId: 'widget-1',
selectionPreferencesSource: SelectionPreferencesSource.IN_MEMORY,
});
builder.addSeries(createSeriesProps({ label: 'Requests' }));
builder.getLegendItems();
builder.getConfig();
expect(
getStoredSeriesVisibilityMock.getStoredSeriesVisibility,
).not.toHaveBeenCalled();
});
it('adds thresholds only once per scale key', () => {
const builder = new UPlotConfigBuilder();
const thresholdsOptions = {
scaleKey: 'y',
thresholds: [{ thresholdValue: 100 }],
};
builder.addThresholds(thresholdsOptions);
builder.addThresholds(thresholdsOptions);
const config = builder.getConfig();
const drawHooks = config.hooks?.draw ?? [];
// Only a single draw hook should be registered for the same scaleKey
expect(drawHooks.length).toBe(1);
});
it('adds multiple thresholds when scale key is different', () => {
const builder = new UPlotConfigBuilder();
const thresholdsOptions = {
scaleKey: 'y',
thresholds: [{ thresholdValue: 100 }],
};
builder.addThresholds(thresholdsOptions);
const thresholdsOptions2 = {
scaleKey: 'y2',
thresholds: [{ thresholdValue: 200 }],
};
builder.addThresholds(thresholdsOptions2);
const config = builder.getConfig();
const drawHooks = config.hooks?.draw ?? [];
// Two draw hooks should be registered for different scaleKeys
expect(drawHooks.length).toBe(2);
});
it('merges cursor configuration with defaults instead of replacing them', () => {
const builder = new UPlotConfigBuilder();
builder.setCursor({
drag: { setScale: false },
});
const config = builder.getConfig();
expect(config.cursor?.drag?.setScale).toBe(false);
// Points configuration from DEFAULT_CURSOR_CONFIG should still be present
expect(config.cursor?.points).toBeDefined();
});
it('adds plugins and includes them in config', () => {
const builder = new UPlotConfigBuilder();
const plugin: uPlot.Plugin = {
opts: (): void => {},
hooks: {},
};
builder.addPlugin(plugin);
const config = builder.getConfig();
expect(config.plugins).toContain(plugin);
});
it('sets padding, legend, focus, select, tzDate, bands and includes them in config', () => {
const tzDate = (ts: number): Date => new Date(ts);
const builder = new UPlotConfigBuilder();
const bands: uPlot.Band[] = [{ series: [1, 2], fill: (): string => '#000' }];
builder.setBands(bands);
builder.setPadding([10, 20, 30, 40]);
builder.setLegend({ show: true, live: true });
builder.setFocus({ alpha: 0.5 });
builder.setSelect({ left: 0, width: 0, top: 0, height: 0 });
builder.setTzDate(tzDate);
const config = builder.getConfig();
expect(config.bands).toEqual(bands);
expect(config.padding).toEqual([10, 20, 30, 40]);
expect(config.legend).toEqual({ show: true, live: true });
expect(config.focus).toEqual({ alpha: 0.5 });
expect(config.select).toEqual({ left: 0, width: 0, top: 0, height: 0 });
expect(config.tzDate).toBe(tzDate);
});
it('does not include plugins when none added', () => {
const builder = new UPlotConfigBuilder();
const config = builder.getConfig();
expect(config.plugins).toBeUndefined();
});
it('does not include bands when empty', () => {
const builder = new UPlotConfigBuilder();
const config = builder.getConfig();
expect(config.bands).toBeUndefined();
});
});

View File

@@ -1,236 +0,0 @@
import type uPlot from 'uplot';
import * as scaleUtils from '../../utils/scale';
import type { ScaleProps } from '../types';
import { DistributionType } from '../types';
import { UPlotScaleBuilder } from '../UPlotScaleBuilder';
const createScaleProps = (overrides: Partial<ScaleProps> = {}): ScaleProps => ({
scaleKey: 'y',
time: false,
auto: undefined,
min: undefined,
max: undefined,
softMin: undefined,
softMax: undefined,
distribution: DistributionType.Linear,
...overrides,
});
describe('UPlotScaleBuilder', () => {
const getFallbackMinMaxSpy = jest.spyOn(
scaleUtils,
'getFallbackMinMaxTimeStamp',
);
beforeEach(() => {
jest.clearAllMocks();
});
it('initializes softMin/softMax correctly when both are 0 (treated as unset)', () => {
const builder = new UPlotScaleBuilder(
createScaleProps({
softMin: 0,
softMax: 0,
}),
);
// Non-time scale so config path uses thresholds pipeline; we just care that
// adjustSoftLimitsWithThresholds receives null soft limits instead of 0/0.
const adjustSpy = jest.spyOn(scaleUtils, 'adjustSoftLimitsWithThresholds');
builder.getConfig();
expect(adjustSpy).toHaveBeenCalledWith(null, null, undefined, undefined);
});
it('handles time scales using explicit min/max and rounds max down to the previous minute', () => {
const min = 1_700_000_000; // seconds
const max = 1_700_000_600; // seconds
const builder = new UPlotScaleBuilder(
createScaleProps({
scaleKey: 'x',
time: true,
min,
max,
}),
);
const config = builder.getConfig();
const xScale = config.x;
expect(xScale.time).toBe(true);
expect(xScale.auto).toBe(false);
expect(Array.isArray(xScale.range)).toBe(true);
const [resolvedMin, resolvedMax] = xScale.range as [number, number];
// min is passed through
expect(resolvedMin).toBe(min);
// max is coerced to "endTime - 1 minute" and rounded down to minute precision
const oneMinuteAgoTimestamp = (max - 60) * 1000;
const currentDate = new Date(oneMinuteAgoTimestamp);
currentDate.setSeconds(0);
currentDate.setMilliseconds(0);
const expectedMax = Math.floor(currentDate.getTime() / 1000);
expect(resolvedMax).toBe(expectedMax);
});
it('falls back to getFallbackMinMaxTimeStamp when time scale has no min/max', () => {
getFallbackMinMaxSpy.mockReturnValue({
fallbackMin: 100,
fallbackMax: 200,
});
const builder = new UPlotScaleBuilder(
createScaleProps({
scaleKey: 'x',
time: true,
min: undefined,
max: undefined,
}),
);
const config = builder.getConfig();
const [resolvedMin, resolvedMax] = config.x.range as [number, number];
expect(getFallbackMinMaxSpy).toHaveBeenCalled();
expect(resolvedMin).toBe(100);
// max is aligned to "fallbackMax - 60 seconds" minute boundary
expect(resolvedMax).toBeLessThanOrEqual(200);
expect(resolvedMax).toBeGreaterThan(100);
});
it('pipes limits through soft-limit adjustment and log-scale normalization before range config', () => {
const adjustSpy = jest.spyOn(scaleUtils, 'adjustSoftLimitsWithThresholds');
const normalizeSpy = jest.spyOn(scaleUtils, 'normalizeLogScaleLimits');
const getRangeConfigSpy = jest.spyOn(scaleUtils, 'getRangeConfig');
const thresholds = {
scaleKey: 'y',
thresholds: [{ thresholdValue: 10 }],
yAxisUnit: 'ms',
};
const builder = new UPlotScaleBuilder(
createScaleProps({
softMin: 1,
softMax: 5,
min: 0,
max: 100,
distribution: DistributionType.Logarithmic,
thresholds,
logBase: 2,
padMinBy: 0.1,
padMaxBy: 0.2,
}),
);
builder.getConfig();
expect(adjustSpy).toHaveBeenCalledWith(1, 5, thresholds.thresholds, 'ms');
expect(normalizeSpy).toHaveBeenCalledWith({
distr: DistributionType.Logarithmic,
logBase: 2,
limits: {
min: 0,
max: 100,
softMin: expect.anything(),
softMax: expect.anything(),
},
});
expect(getRangeConfigSpy).toHaveBeenCalled();
});
it('computes distribution config for non-time scales and wires range function when range is not provided', () => {
const createRangeFnSpy = jest.spyOn(scaleUtils, 'createRangeFunction');
const builder = new UPlotScaleBuilder(
createScaleProps({
scaleKey: 'y',
time: false,
distribution: DistributionType.Linear,
}),
);
const config = builder.getConfig();
const yScale = config.y;
expect(createRangeFnSpy).toHaveBeenCalled();
// range should be a function when not provided explicitly
expect(typeof yScale.range).toBe('function');
// distribution config should be applied
expect(yScale.distr).toBeDefined();
expect(yScale.log).toBeDefined();
});
it('respects explicit range function when provided on props', () => {
const explicitRange: uPlot.Scale.Range = jest.fn(() => [
0,
10,
]) as uPlot.Scale.Range;
const builder = new UPlotScaleBuilder(
createScaleProps({
scaleKey: 'y',
range: explicitRange,
}),
);
const config = builder.getConfig();
const yScale = config.y;
expect(yScale.range).toBe(explicitRange);
});
it('derives auto flag when not explicitly provided, based on hasFixedRange and time', () => {
const getRangeConfigSpy = jest.spyOn(scaleUtils, 'getRangeConfig');
const builder = new UPlotScaleBuilder(
createScaleProps({
min: 0,
max: 100,
time: false,
}),
);
const config = builder.getConfig();
const yScale = config.y;
expect(getRangeConfigSpy).toHaveBeenCalled();
// For non-time scale with fixed min/max, hasFixedRange is true → auto should remain false
expect(yScale.auto).toBe(false);
});
it('merge updates internal min/max/soft limits while preserving other props', () => {
const builder = new UPlotScaleBuilder(
createScaleProps({
scaleKey: 'y',
min: 0,
max: 10,
softMin: 1,
softMax: 9,
time: false,
}),
);
builder.merge({
min: 2,
softMax: undefined,
});
expect(builder.props.min).toBe(2);
expect(builder.props.softMax).toBe(undefined);
expect(builder.props.max).toBe(10);
expect(builder.props.softMin).toBe(1);
expect(builder.props.time).toBe(false);
expect(builder.props.scaleKey).toBe('y');
expect(builder.props.distribution).toBe(DistributionType.Linear);
expect(builder.props.thresholds).toBe(undefined);
});
});

View File

@@ -1,295 +0,0 @@
import { PANEL_TYPES } from 'constants/queryBuilder';
import { themeColors } from 'constants/theme';
import uPlot from 'uplot';
import type { SeriesProps } from '../types';
import {
DrawStyle,
LineInterpolation,
LineStyle,
VisibilityMode,
} from '../types';
import { UPlotSeriesBuilder } from '../UPlotSeriesBuilder';
const createBaseProps = (
overrides: Partial<SeriesProps> = {},
): SeriesProps => ({
scaleKey: 'y',
label: 'Requests',
colorMapping: {},
drawStyle: DrawStyle.Line,
isDarkMode: false,
panelType: PANEL_TYPES.TIME_SERIES,
...overrides,
});
interface MockPath extends uPlot.Series.Paths {
name?: string;
}
describe('UPlotSeriesBuilder', () => {
it('maps basic props into uPlot series config', () => {
const builder = new UPlotSeriesBuilder(
createBaseProps({
label: 'Latency',
spanGaps: true,
show: false,
}),
);
const config = builder.getConfig();
expect(config.scale).toBe('y');
expect(config.label).toBe('Latency');
expect(config.spanGaps).toBe(true);
expect(config.show).toBe(false);
expect(config.pxAlign).toBe(true);
expect(typeof config.value).toBe('function');
});
it('uses explicit lineColor when provided, regardless of mapping', () => {
const builder = new UPlotSeriesBuilder(
createBaseProps({
lineColor: '#ff00ff',
colorMapping: { Requests: '#00ff00' },
}),
);
const config = builder.getConfig();
expect(config.stroke).toBe('#ff00ff');
});
it('falls back to theme colors when no label is provided', () => {
const darkBuilder = new UPlotSeriesBuilder(
createBaseProps({
label: undefined,
isDarkMode: true,
lineColor: undefined,
}),
);
const lightBuilder = new UPlotSeriesBuilder(
createBaseProps({
label: undefined,
isDarkMode: false,
lineColor: undefined,
}),
);
const darkConfig = darkBuilder.getConfig();
const lightConfig = lightBuilder.getConfig();
expect(darkConfig.stroke).toBe(themeColors.white);
expect(lightConfig.stroke).toBe(themeColors.black);
});
it('uses colorMapping when available and no explicit lineColor is provided', () => {
const builder = new UPlotSeriesBuilder(
createBaseProps({
label: 'Requests',
colorMapping: { Requests: '#123456' },
lineColor: undefined,
}),
);
const config = builder.getConfig();
expect(config.stroke).toBe('#123456');
});
it('passes through a custom pathBuilder when provided', () => {
const customPaths = (jest.fn() as unknown) as uPlot.Series.PathBuilder;
const builder = new UPlotSeriesBuilder(
createBaseProps({
pathBuilder: customPaths,
}),
);
const config = builder.getConfig();
expect(config.paths).toBe(customPaths);
});
it('does not build line paths when drawStyle is Points, but still renders points', () => {
const builder = new UPlotSeriesBuilder(
createBaseProps({
drawStyle: DrawStyle.Points,
pointSize: 4,
lineWidth: 2,
lineColor: '#aa00aa',
}),
);
const config = builder.getConfig();
expect(typeof config.paths).toBe('function');
expect(config.paths && config.paths({} as uPlot, 1, 0, 10)).toBeNull();
expect(config.points).toBeDefined();
expect(config.points?.stroke).toBe('#aa00aa');
expect(config.points?.fill).toBe('#aa00aa');
expect(config.points?.show).toBe(true);
expect(config.points?.size).toBe(4);
});
it('derives point size based on lineWidth and pointSize', () => {
const smallPointsBuilder = new UPlotSeriesBuilder(
createBaseProps({
lineWidth: 4,
pointSize: 2,
}),
);
const largePointsBuilder = new UPlotSeriesBuilder(
createBaseProps({
lineWidth: 2,
pointSize: 4,
}),
);
const smallConfig = smallPointsBuilder.getConfig();
const largeConfig = largePointsBuilder.getConfig();
expect(smallConfig.points?.size).toBeUndefined();
expect(largeConfig.points?.size).toBe(4);
});
it('uses pointsBuilder when provided instead of default visibility logic', () => {
const pointsBuilder: uPlot.Series.Points.Show = jest.fn(
() => true,
) as uPlot.Series.Points.Show;
const builder = new UPlotSeriesBuilder(
createBaseProps({
pointsBuilder,
drawStyle: DrawStyle.Line,
}),
);
const config = builder.getConfig();
expect(config.points?.show).toBe(pointsBuilder);
});
it('respects VisibilityMode for point visibility when no custom pointsBuilder is given', () => {
const neverPointsBuilder = new UPlotSeriesBuilder(
createBaseProps({
drawStyle: DrawStyle.Line,
showPoints: VisibilityMode.Never,
}),
);
const alwaysPointsBuilder = new UPlotSeriesBuilder(
createBaseProps({
drawStyle: DrawStyle.Line,
showPoints: VisibilityMode.Always,
}),
);
const neverConfig = neverPointsBuilder.getConfig();
const alwaysConfig = alwaysPointsBuilder.getConfig();
expect(neverConfig.points?.show).toBe(false);
expect(alwaysConfig.points?.show).toBe(true);
});
it('applies LineStyle.Dashed and lineCap to line config', () => {
const builder = new UPlotSeriesBuilder(
createBaseProps({
lineStyle: LineStyle.Dashed,
lineCap: 'round' as CanvasLineCap,
}),
);
const config = builder.getConfig();
expect(config.dash).toEqual([10, 10]);
expect(config.cap).toBe('round');
});
it('builds default paths for Line drawStyle and invokes the path builder', () => {
const builder = new UPlotSeriesBuilder(
createBaseProps({
drawStyle: DrawStyle.Line,
lineInterpolation: LineInterpolation.Linear,
}),
);
const config = builder.getConfig();
const result = config.paths?.({} as uPlot, 1, 0, 10);
expect((result as MockPath).name).toBe('linear');
});
it('uses StepBefore and StepAfter interpolation for line paths', () => {
const stepBeforeBuilder = new UPlotSeriesBuilder(
createBaseProps({
drawStyle: DrawStyle.Line,
lineInterpolation: LineInterpolation.StepBefore,
}),
);
const stepAfterBuilder = new UPlotSeriesBuilder(
createBaseProps({
drawStyle: DrawStyle.Line,
lineInterpolation: LineInterpolation.StepAfter,
}),
);
const stepBeforeConfig = stepBeforeBuilder.getConfig();
const stepAfterConfig = stepAfterBuilder.getConfig();
const stepBeforePath = stepBeforeConfig.paths?.({} as uPlot, 1, 0, 5);
const stepAfterPath = stepAfterConfig.paths?.({} as uPlot, 1, 0, 5);
expect((stepBeforePath as MockPath).name).toBe('stepped-(-1)');
expect((stepAfterPath as MockPath).name).toBe('stepped-(1)');
});
it('defaults to spline interpolation when lineInterpolation is Spline or undefined', () => {
const splineBuilder = new UPlotSeriesBuilder(
createBaseProps({
drawStyle: DrawStyle.Line,
lineInterpolation: LineInterpolation.Spline,
}),
);
const defaultBuilder = new UPlotSeriesBuilder(
createBaseProps({ drawStyle: DrawStyle.Line }),
);
const splineConfig = splineBuilder.getConfig();
const defaultConfig = defaultBuilder.getConfig();
const splinePath = splineConfig.paths?.({} as uPlot, 1, 0, 10);
const defaultPath = defaultConfig.paths?.({} as uPlot, 1, 0, 10);
expect((splinePath as MockPath).name).toBe('spline');
expect((defaultPath as MockPath).name).toBe('spline');
});
it('uses generateColor when label has no colorMapping and no lineColor', () => {
const builder = new UPlotSeriesBuilder(
createBaseProps({
label: 'CustomSeries',
colorMapping: {},
lineColor: undefined,
}),
);
const config = builder.getConfig();
expect(config.stroke).toBe('#E64A3C');
});
it('passes through pointsFilter when provided', () => {
const pointsFilter: uPlot.Series.Points.Filter = jest.fn(
(_self, _seriesIdx, _show) => null,
);
const builder = new UPlotSeriesBuilder(
createBaseProps({
pointsFilter,
drawStyle: DrawStyle.Line,
}),
);
const config = builder.getConfig();
expect(config.points?.filter).toBe(pointsFilter);
});
});

View File

@@ -1,395 +0,0 @@
import { render, screen } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import { updateSeriesVisibilityToLocalStorage } from 'container/DashboardContainer/visualization/panels/utils/legendVisibilityUtils';
import {
PlotContextProvider,
usePlotContext,
} from 'lib/uPlotV2/context/PlotContext';
import type uPlot from 'uplot';
jest.mock(
'container/DashboardContainer/visualization/panels/utils/legendVisibilityUtils',
() => ({
updateSeriesVisibilityToLocalStorage: jest.fn(),
}),
);
const mockUpdateSeriesVisibilityToLocalStorage = updateSeriesVisibilityToLocalStorage as jest.MockedFunction<
typeof updateSeriesVisibilityToLocalStorage
>;
interface MockSeries extends Partial<uPlot.Series> {
label?: string;
show?: boolean;
}
const createMockPlot = (series: MockSeries[] = []): uPlot =>
(({
series,
batch: jest.fn((fn: () => void) => fn()),
setSeries: jest.fn(),
} as unknown) as uPlot);
interface TestComponentProps {
plot?: uPlot;
widgetId?: string;
shouldSaveSelectionPreference?: boolean;
}
const TestComponent = ({
plot,
widgetId,
shouldSaveSelectionPreference,
}: TestComponentProps): JSX.Element => {
const {
setPlotContextInitialState,
syncSeriesVisibilityToLocalStorage,
onToggleSeriesVisibility,
onToggleSeriesOnOff,
onFocusSeries,
} = usePlotContext();
const handleInit = (): void => {
if (
!plot ||
!widgetId ||
typeof shouldSaveSelectionPreference !== 'boolean'
) {
return;
}
setPlotContextInitialState({
uPlotInstance: plot,
widgetId,
shouldSaveSelectionPreference,
});
};
return (
<div>
<button type="button" data-testid="init" onClick={handleInit}>
Init
</button>
<button
type="button"
data-testid="sync-visibility"
onClick={(): void => syncSeriesVisibilityToLocalStorage()}
>
Sync visibility
</button>
<button
type="button"
data-testid="toggle-visibility"
onClick={(): void => onToggleSeriesVisibility(1)}
>
Toggle visibility
</button>
<button
type="button"
data-testid="toggle-on-off-1"
onClick={(): void => onToggleSeriesOnOff(1)}
>
Toggle on/off 1
</button>
<button
type="button"
data-testid="toggle-on-off-5"
onClick={(): void => onToggleSeriesOnOff(5)}
>
Toggle on/off 5
</button>
<button
type="button"
data-testid="focus-series"
onClick={(): void => onFocusSeries(1)}
>
Focus series
</button>
</div>
);
};
describe('PlotContext', () => {
afterEach(() => {
jest.clearAllMocks();
});
it('throws when usePlotContext is used outside provider', () => {
const Consumer = (): JSX.Element => {
// eslint-disable-next-line react-hooks/rules-of-hooks
usePlotContext();
return <div />;
};
expect(() => render(<Consumer />)).toThrow(
'Should be used inside the context',
);
});
it('syncSeriesVisibilityToLocalStorage does nothing without plot or widgetId', async () => {
const user = userEvent.setup();
render(
<PlotContextProvider>
<TestComponent />
</PlotContextProvider>,
);
await user.click(screen.getByTestId('sync-visibility'));
expect(mockUpdateSeriesVisibilityToLocalStorage).not.toHaveBeenCalled();
});
it('syncSeriesVisibilityToLocalStorage serializes series visibility to localStorage helper', async () => {
const user = userEvent.setup();
const plot = createMockPlot([
{ label: 'x-axis', show: true },
{ label: 'CPU', show: true },
{ label: 'Memory', show: false },
]);
render(
<PlotContextProvider>
<TestComponent
plot={plot}
widgetId="widget-123"
shouldSaveSelectionPreference
/>
</PlotContextProvider>,
);
await user.click(screen.getByTestId('init'));
await user.click(screen.getByTestId('sync-visibility'));
expect(mockUpdateSeriesVisibilityToLocalStorage).toHaveBeenCalledTimes(1);
expect(mockUpdateSeriesVisibilityToLocalStorage).toHaveBeenCalledWith(
'widget-123',
[
{ label: 'x-axis', show: true },
{ label: 'CPU', show: true },
{ label: 'Memory', show: false },
],
);
});
describe('onToggleSeriesVisibility', () => {
it('does nothing when plot instance is not set', async () => {
const user = userEvent.setup();
render(
<PlotContextProvider>
<TestComponent />
</PlotContextProvider>,
);
await user.click(screen.getByTestId('toggle-visibility'));
// No errors and no calls to localStorage helper
expect(mockUpdateSeriesVisibilityToLocalStorage).not.toHaveBeenCalled();
});
it('highlights a single series and saves visibility when preferences are enabled', async () => {
const user = userEvent.setup();
const series: MockSeries[] = [
{ label: 'x-axis', show: true },
{ label: 'CPU', show: true },
{ label: 'Memory', show: true },
];
const plot = createMockPlot(series);
render(
<PlotContextProvider>
<TestComponent
plot={plot}
widgetId="widget-visibility"
shouldSaveSelectionPreference
/>
</PlotContextProvider>,
);
await user.click(screen.getByTestId('init'));
await user.click(screen.getByTestId('toggle-visibility'));
const setSeries = (plot.setSeries as jest.Mock).mock.calls;
// index 0 is skipped, so we expect calls for 1 and 2
expect(setSeries).toEqual([
[1, { show: true }],
[2, { show: false }],
]);
expect(mockUpdateSeriesVisibilityToLocalStorage).toHaveBeenCalledTimes(1);
expect(mockUpdateSeriesVisibilityToLocalStorage).toHaveBeenCalledWith(
'widget-visibility',
[
{ label: 'x-axis', show: true },
{ label: 'CPU', show: true },
{ label: 'Memory', show: true },
],
);
});
it('resets visibility for all series when toggling the same index again', async () => {
const user = userEvent.setup();
const series: MockSeries[] = [
{ label: 'x-axis', show: true },
{ label: 'CPU', show: true },
{ label: 'Memory', show: true },
];
const plot = createMockPlot(series);
render(
<PlotContextProvider>
<TestComponent
plot={plot}
widgetId="widget-reset"
shouldSaveSelectionPreference
/>
</PlotContextProvider>,
);
await user.click(screen.getByTestId('init'));
await user.click(screen.getByTestId('toggle-visibility'));
(plot.setSeries as jest.Mock).mockClear();
await user.click(screen.getByTestId('toggle-visibility'));
const setSeries = (plot.setSeries as jest.Mock).mock.calls;
// After reset, all non-zero series should be shown
expect(setSeries).toEqual([
[1, { show: true }],
[2, { show: true }],
]);
});
});
describe('onToggleSeriesOnOff', () => {
it('does nothing when plot instance is not set', async () => {
const user = userEvent.setup();
render(
<PlotContextProvider>
<TestComponent />
</PlotContextProvider>,
);
await user.click(screen.getByTestId('toggle-on-off-1'));
expect(mockUpdateSeriesVisibilityToLocalStorage).not.toHaveBeenCalled();
});
it('toggles series show flag and saves visibility when preferences are enabled', async () => {
const user = userEvent.setup();
const series: MockSeries[] = [
{ label: 'x-axis', show: true },
{ label: 'CPU', show: true },
];
const plot = createMockPlot(series);
render(
<PlotContextProvider>
<TestComponent
plot={plot}
widgetId="widget-toggle"
shouldSaveSelectionPreference
/>
</PlotContextProvider>,
);
await user.click(screen.getByTestId('init'));
await user.click(screen.getByTestId('toggle-on-off-1'));
expect(plot.setSeries).toHaveBeenCalledWith(1, { show: false });
expect(mockUpdateSeriesVisibilityToLocalStorage).toHaveBeenCalledTimes(1);
expect(mockUpdateSeriesVisibilityToLocalStorage).toHaveBeenCalledWith(
'widget-toggle',
expect.any(Array),
);
});
it('does not toggle when target series does not exist', async () => {
const user = userEvent.setup();
const series: MockSeries[] = [{ label: 'x-axis', show: true }];
const plot = createMockPlot(series);
render(
<PlotContextProvider>
<TestComponent
plot={plot}
widgetId="widget-missing-series"
shouldSaveSelectionPreference
/>
</PlotContextProvider>,
);
await user.click(screen.getByTestId('init'));
await user.click(screen.getByTestId('toggle-on-off-5'));
expect(plot.setSeries).not.toHaveBeenCalled();
expect(mockUpdateSeriesVisibilityToLocalStorage).not.toHaveBeenCalled();
});
it('does not persist visibility when preferences flag is disabled', async () => {
const user = userEvent.setup();
const series: MockSeries[] = [
{ label: 'x-axis', show: true },
{ label: 'CPU', show: true },
];
const plot = createMockPlot(series);
render(
<PlotContextProvider>
<TestComponent
plot={plot}
widgetId="widget-no-persist"
shouldSaveSelectionPreference={false}
/>
</PlotContextProvider>,
);
await user.click(screen.getByTestId('init'));
await user.click(screen.getByTestId('toggle-on-off-1'));
expect(plot.setSeries).toHaveBeenCalledWith(1, { show: false });
expect(mockUpdateSeriesVisibilityToLocalStorage).not.toHaveBeenCalled();
});
});
describe('onFocusSeries', () => {
it('does nothing when plot instance is not set', async () => {
const user = userEvent.setup();
render(
<PlotContextProvider>
<TestComponent />
</PlotContextProvider>,
);
await user.click(screen.getByTestId('focus-series'));
});
it('sets focus on the given series index', async () => {
const user = userEvent.setup();
const plot = createMockPlot([
{ label: 'x-axis', show: true },
{ label: 'CPU', show: true },
]);
render(
<PlotContextProvider>
<TestComponent
plot={plot}
widgetId="widget-focus"
shouldSaveSelectionPreference={false}
/>
</PlotContextProvider>,
);
await user.click(screen.getByTestId('init'));
await user.click(screen.getByTestId('focus-series'));
expect(plot.setSeries).toHaveBeenCalledWith(1, { focus: true }, false);
});
});
});

View File

@@ -1,201 +0,0 @@
import { renderHook } from '@testing-library/react';
import { usePlotContext } from 'lib/uPlotV2/context/PlotContext';
import { useLegendActions } from 'lib/uPlotV2/hooks/useLegendActions';
jest.mock('lib/uPlotV2/context/PlotContext');
const mockUsePlotContext = usePlotContext as jest.MockedFunction<
typeof usePlotContext
>;
describe('useLegendActions', () => {
let onToggleSeriesVisibility: jest.Mock;
let onToggleSeriesOnOff: jest.Mock;
let onFocusSeriesPlot: jest.Mock;
let setPlotContextInitialState: jest.Mock;
let syncSeriesVisibilityToLocalStorage: jest.Mock;
let setFocusedSeriesIndexMock: jest.Mock;
let cancelAnimationFrameSpy: jest.SpyInstance<void, [handle: number]>;
beforeAll(() => {
jest
.spyOn(global, 'requestAnimationFrame')
.mockImplementation((cb: FrameRequestCallback): number => {
cb(0);
return 1;
});
cancelAnimationFrameSpy = jest
.spyOn(global, 'cancelAnimationFrame')
.mockImplementation(() => {});
});
afterAll(() => {
jest.restoreAllMocks();
});
beforeEach(() => {
onToggleSeriesVisibility = jest.fn();
onToggleSeriesOnOff = jest.fn();
onFocusSeriesPlot = jest.fn();
setPlotContextInitialState = jest.fn();
syncSeriesVisibilityToLocalStorage = jest.fn();
setFocusedSeriesIndexMock = jest.fn();
mockUsePlotContext.mockReturnValue({
onToggleSeriesVisibility,
onToggleSeriesOnOff,
onFocusSeries: onFocusSeriesPlot,
setPlotContextInitialState,
syncSeriesVisibilityToLocalStorage,
});
cancelAnimationFrameSpy.mockClear();
});
const createMouseEvent = (options: {
legendItemId?: number;
isMarker?: boolean;
}): any => {
const { legendItemId, isMarker = false } = options;
return {
target: {
dataset: {
...(isMarker ? { isLegendMarker: 'true' } : {}),
},
closest: jest.fn(() =>
legendItemId !== undefined
? { dataset: { legendItemId: String(legendItemId) } }
: null,
),
},
};
};
describe('onLegendClick', () => {
it('toggles series visibility when clicking on legend label', async () => {
const { result } = renderHook(() =>
useLegendActions({
setFocusedSeriesIndex: setFocusedSeriesIndexMock,
focusedSeriesIndex: null,
}),
);
result.current.onLegendClick(createMouseEvent({ legendItemId: 0 }));
expect(onToggleSeriesVisibility).toHaveBeenCalledTimes(1);
expect(onToggleSeriesVisibility).toHaveBeenCalledWith(0);
expect(onToggleSeriesOnOff).not.toHaveBeenCalled();
});
it('toggles series on/off when clicking on marker', async () => {
const { result } = renderHook(() =>
useLegendActions({
setFocusedSeriesIndex: setFocusedSeriesIndexMock,
focusedSeriesIndex: null,
}),
);
result.current.onLegendClick(
createMouseEvent({ legendItemId: 0, isMarker: true }),
);
expect(onToggleSeriesOnOff).toHaveBeenCalledTimes(1);
expect(onToggleSeriesOnOff).toHaveBeenCalledWith(0);
expect(onToggleSeriesVisibility).not.toHaveBeenCalled();
});
it('does nothing when click target is not inside a legend item', async () => {
const { result } = renderHook(() =>
useLegendActions({
setFocusedSeriesIndex: setFocusedSeriesIndexMock,
focusedSeriesIndex: null,
}),
);
result.current.onLegendClick(createMouseEvent({}));
expect(onToggleSeriesOnOff).not.toHaveBeenCalled();
expect(onToggleSeriesVisibility).not.toHaveBeenCalled();
});
});
describe('onFocusSeries', () => {
it('schedules focus update and calls plot focus handler via mouse move', async () => {
const { result } = renderHook(() =>
useLegendActions({
setFocusedSeriesIndex: setFocusedSeriesIndexMock,
focusedSeriesIndex: null,
}),
);
result.current.onLegendMouseMove(createMouseEvent({ legendItemId: 0 }));
expect(setFocusedSeriesIndexMock).toHaveBeenCalledWith(0);
expect(onFocusSeriesPlot).toHaveBeenCalledWith(0);
});
it('cancels previous animation frame before scheduling new one on subsequent mouse moves', async () => {
const { result } = renderHook(() =>
useLegendActions({
setFocusedSeriesIndex: setFocusedSeriesIndexMock,
focusedSeriesIndex: null,
}),
);
result.current.onLegendMouseMove(createMouseEvent({ legendItemId: 0 }));
result.current.onLegendMouseMove(createMouseEvent({ legendItemId: 1 }));
expect(cancelAnimationFrameSpy).toHaveBeenCalled();
});
});
describe('onLegendMouseMove', () => {
it('focuses new series when hovering over different legend item', async () => {
const { result } = renderHook(() =>
useLegendActions({
setFocusedSeriesIndex: setFocusedSeriesIndexMock,
focusedSeriesIndex: 0,
}),
);
result.current.onLegendMouseMove(createMouseEvent({ legendItemId: 1 }));
expect(setFocusedSeriesIndexMock).toHaveBeenCalledWith(1);
expect(onFocusSeriesPlot).toHaveBeenCalledWith(1);
});
it('does nothing when hovering over already focused series', async () => {
const { result } = renderHook(() =>
useLegendActions({
setFocusedSeriesIndex: setFocusedSeriesIndexMock,
focusedSeriesIndex: 1,
}),
);
result.current.onLegendMouseMove(createMouseEvent({ legendItemId: 1 }));
expect(setFocusedSeriesIndexMock).not.toHaveBeenCalled();
expect(onFocusSeriesPlot).not.toHaveBeenCalled();
});
});
describe('onLegendMouseLeave', () => {
it('cancels pending animation frame and clears focus state', async () => {
const { result } = renderHook(() =>
useLegendActions({
setFocusedSeriesIndex: setFocusedSeriesIndexMock,
focusedSeriesIndex: null,
}),
);
result.current.onLegendMouseMove(createMouseEvent({ legendItemId: 0 }));
result.current.onLegendMouseLeave();
expect(cancelAnimationFrameSpy).toHaveBeenCalled();
expect(setFocusedSeriesIndexMock).toHaveBeenCalledWith(null);
expect(onFocusSeriesPlot).toHaveBeenCalledWith(null);
});
});
});

View File

@@ -1,192 +0,0 @@
import { act, cleanup, renderHook } from '@testing-library/react';
import type { LegendItem } from 'lib/uPlotV2/config/types';
import type { UPlotConfigBuilder } from 'lib/uPlotV2/config/UPlotConfigBuilder';
import useLegendsSync from 'lib/uPlotV2/hooks/useLegendsSync';
describe('useLegendsSync', () => {
let requestAnimationFrameSpy: jest.SpyInstance<
number,
[callback: FrameRequestCallback]
>;
let cancelAnimationFrameSpy: jest.SpyInstance<void, [handle: number]>;
beforeAll(() => {
requestAnimationFrameSpy = jest
.spyOn(global, 'requestAnimationFrame')
.mockImplementation((cb: FrameRequestCallback): number => {
cb(0);
return 1;
});
cancelAnimationFrameSpy = jest
.spyOn(global, 'cancelAnimationFrame')
.mockImplementation(() => {});
});
afterEach(() => {
jest.clearAllMocks();
cleanup();
});
afterAll(() => {
jest.restoreAllMocks();
});
const createMockConfig = (
legendItems: Record<number, LegendItem>,
): {
config: UPlotConfigBuilder;
invokeSetSeries: (
seriesIndex: number | null,
opts: { show?: boolean; focus?: boolean },
fireHook?: boolean,
) => void;
} => {
let setSeriesHandler:
| ((u: uPlot, seriesIndex: number | null, opts: uPlot.Series) => void)
| null = null;
const config = ({
getLegendItems: jest.fn(() => legendItems),
addHook: jest.fn(
(
hookName: string,
handler: (
u: uPlot,
seriesIndex: number | null,
opts: uPlot.Series,
) => void,
) => {
if (hookName === 'setSeries') {
setSeriesHandler = handler;
}
return (): void => {
setSeriesHandler = null;
};
},
),
} as unknown) as UPlotConfigBuilder;
const invokeSetSeries = (
seriesIndex: number | null,
opts: { show?: boolean; focus?: boolean },
): void => {
if (setSeriesHandler) {
setSeriesHandler({} as uPlot, seriesIndex, { ...opts });
}
};
return { config, invokeSetSeries };
};
it('initializes legend items from config', () => {
const initialItems: Record<number, LegendItem> = {
1: { seriesIndex: 1, label: 'CPU', show: true, color: '#f00' },
2: { seriesIndex: 2, label: 'Memory', show: false, color: '#0f0' },
};
const { config } = createMockConfig(initialItems);
const { result } = renderHook(() => useLegendsSync({ config }));
expect(config.getLegendItems).toHaveBeenCalledTimes(1);
expect(config.addHook).toHaveBeenCalledWith(
'setSeries',
expect.any(Function),
);
expect(result.current.legendItemsMap).toEqual(initialItems);
});
it('updates focusedSeriesIndex when a series gains focus via setSeries by default', async () => {
const initialItems: Record<number, LegendItem> = {
1: { seriesIndex: 1, label: 'CPU', show: true, color: '#f00' },
};
const { config, invokeSetSeries } = createMockConfig(initialItems);
const { result } = renderHook(() => useLegendsSync({ config }));
expect(result.current.focusedSeriesIndex).toBeNull();
await act(async () => {
invokeSetSeries(1, { focus: true });
});
expect(result.current.focusedSeriesIndex).toBe(1);
});
it('does not update focusedSeriesIndex when subscribeToFocusChange is false', () => {
const initialItems: Record<number, LegendItem> = {
1: { seriesIndex: 1, label: 'CPU', show: true, color: '#f00' },
};
const { config, invokeSetSeries } = createMockConfig(initialItems);
const { result } = renderHook(() =>
useLegendsSync({ config, subscribeToFocusChange: false }),
);
invokeSetSeries(1, { focus: true });
expect(result.current.focusedSeriesIndex).toBeNull();
});
it('updates legendItemsMap visibility when show changes for a series', async () => {
const initialItems: Record<number, LegendItem> = {
0: { seriesIndex: 0, label: 'x-axis', show: true, color: '#000' },
1: { seriesIndex: 1, label: 'CPU', show: true, color: '#f00' },
};
const { config, invokeSetSeries } = createMockConfig(initialItems);
const { result } = renderHook(() => useLegendsSync({ config }));
// Toggle visibility of series 1
await act(async () => {
invokeSetSeries(1, { show: false });
});
expect(result.current.legendItemsMap[1].show).toBe(false);
});
it('ignores visibility updates for unknown legend items or unchanged show values', () => {
const initialItems: Record<number, LegendItem> = {
1: { seriesIndex: 1, label: 'CPU', show: true, color: '#f00' },
};
const { config, invokeSetSeries } = createMockConfig(initialItems);
const { result } = renderHook(() => useLegendsSync({ config }));
const before = result.current.legendItemsMap;
// Unknown series index
invokeSetSeries(5, { show: false });
// Unchanged visibility for existing item
invokeSetSeries(1, { show: true });
const after = result.current.legendItemsMap;
expect(after).toEqual(before);
});
it('cancels pending visibility RAF on unmount', () => {
const initialItems: Record<number, LegendItem> = {
1: { seriesIndex: 1, label: 'CPU', show: true, color: '#f00' },
};
const { config, invokeSetSeries } = createMockConfig(initialItems);
// Override RAF to not immediately invoke callback so we can assert cancellation
requestAnimationFrameSpy.mockImplementationOnce(() => 42);
const { unmount } = renderHook(() => useLegendsSync({ config }));
invokeSetSeries(1, { show: false });
unmount();
expect(cancelAnimationFrameSpy).toHaveBeenCalledWith(42);
});
});

View File

@@ -1,218 +0,0 @@
import type uPlot from 'uplot';
import { Axis } from 'uplot';
import {
buildYAxisSizeCalculator,
calculateTextWidth,
getExistingAxisSize,
} from '../axis';
describe('axis utils', () => {
describe('calculateTextWidth', () => {
it('returns 0 when values are undefined or empty', () => {
const mockSelf = ({
ctx: {
measureText: jest.fn(),
font: '',
},
} as unknown) as uPlot;
// internally the type is string but it is an array of strings
const mockAxis: Axis = { font: (['12px sans-serif'] as unknown) as string };
expect(calculateTextWidth(mockSelf, mockAxis, undefined)).toBe(0);
expect(calculateTextWidth(mockSelf, mockAxis, [])).toBe(0);
});
it('returns 0 when longest value is empty string or axis has no usable font', () => {
const mockSelf = ({
ctx: {
measureText: jest.fn(),
font: '',
},
} as unknown) as uPlot;
const axisWithoutFont: Axis = { font: '' };
const axisWithEmptyFontArray: Axis = { font: '' };
expect(calculateTextWidth(mockSelf, axisWithoutFont, [''])).toBe(0);
expect(
calculateTextWidth(mockSelf, axisWithEmptyFontArray, ['a', 'bb']),
).toBe(0);
});
it('measures longest value using canvas context and axis font', () => {
const measureText = jest.fn(() => ({ width: 100 }));
const mockSelf = ({
ctx: {
font: '',
measureText,
},
} as unknown) as uPlot;
const mockAxis: Axis = { font: (['14px Arial'] as unknown) as string };
const values = ['1', '1234', '12'];
const dpr =
((global as unknown) as { devicePixelRatio?: number }).devicePixelRatio ??
1;
const result = calculateTextWidth(mockSelf, mockAxis, values);
expect(measureText).toHaveBeenCalledWith('1234');
expect(mockSelf.ctx.font).toBe('14px Arial');
expect(result).toBe(100 / dpr);
});
});
describe('getExistingAxisSize', () => {
it('returns internal _size when present', () => {
const axis: any = {
_size: 42,
size: 10,
};
const result = getExistingAxisSize({
uplotInstance: ({} as unknown) as uPlot,
axis,
axisIdx: 0,
cycleNum: 0,
});
expect(result).toBe(42);
});
it('invokes size function when _size is not set', () => {
const sizeFn = jest.fn(() => 24);
const axis: Axis = { size: sizeFn };
const instance = ({} as unknown) as uPlot;
const result = getExistingAxisSize({
uplotInstance: instance,
axis,
values: ['10', '20'],
axisIdx: 1,
cycleNum: 2,
});
expect(sizeFn).toHaveBeenCalledWith(instance, ['10', '20'], 1, 2);
expect(result).toBe(24);
});
it('returns numeric size or 0 when neither _size nor size are provided', () => {
const axisWithSize: Axis = { size: 16 };
const axisWithoutSize: Axis = {};
const instance = ({} as unknown) as uPlot;
expect(
getExistingAxisSize({
uplotInstance: instance,
axis: axisWithSize,
axisIdx: 0,
cycleNum: 0,
}),
).toBe(16);
expect(
getExistingAxisSize({
uplotInstance: instance,
axis: axisWithoutSize,
axisIdx: 0,
cycleNum: 0,
}),
).toBe(0);
});
});
describe('buildYAxisSizeCalculator', () => {
it('delegates to getExistingAxisSize when cycleNum > 1', () => {
const sizeCalculator = buildYAxisSizeCalculator(5);
const axis: any = {
_size: 80,
ticks: { size: 10 },
font: ['12px sans-serif'],
};
const measureText = jest.fn(() => ({ width: 60 }));
const self = ({
axes: [axis],
ctx: {
font: '',
measureText,
},
} as unknown) as uPlot;
if (typeof sizeCalculator === 'number') {
throw new Error('Size calculator is a number');
}
const result = sizeCalculator(self, ['10', '20'], 0, 2);
expect(result).toBe(80);
expect(measureText).not.toHaveBeenCalled();
});
it('computes size from ticks, gap and text width when cycleNum <= 1', () => {
const gap = 7;
const sizeCalculator = buildYAxisSizeCalculator(gap);
const axis: Axis = {
ticks: { size: 12 },
font: (['12px sans-serif'] as unknown) as string,
};
const measureText = jest.fn(() => ({ width: 50 }));
const self = ({
axes: [axis],
ctx: {
font: '',
measureText,
},
} as unknown) as uPlot;
const dpr =
((global as unknown) as { devicePixelRatio?: number }).devicePixelRatio ??
1;
const expected = Math.ceil(12 + gap + 50 / dpr);
if (typeof sizeCalculator === 'number') {
throw new Error('Size calculator is a number');
}
const result = sizeCalculator(self, ['short', 'the-longest'], 0, 0);
expect(measureText).toHaveBeenCalledWith('the-longest');
expect(result).toBe(expected);
});
it('uses 0 ticks size when ticks are not defined', () => {
const gap = 4;
const sizeCalculator = buildYAxisSizeCalculator(gap);
const axis: Axis = {
font: (['12px sans-serif'] as unknown) as string,
};
const measureText = jest.fn(() => ({ width: 40 }));
const self = ({
axes: [axis],
ctx: {
font: '',
measureText,
},
} as unknown) as uPlot;
const dpr =
((global as unknown) as { devicePixelRatio?: number }).devicePixelRatio ??
1;
const expected = Math.ceil(gap + 40 / dpr);
if (typeof sizeCalculator === 'number') {
throw new Error('Size calculator is a number');
}
const result = sizeCalculator(self, ['1', '123'], 0, 1);
expect(result).toBe(expected);
});
});
});

View File

@@ -1,80 +0,0 @@
import { Axis } from 'uplot';
/**
* Calculate text width for longest value
*/
export function calculateTextWidth(
self: uPlot,
axis: Axis,
values: string[] | undefined,
): number {
if (!values || values.length === 0) {
return 0;
}
// Find longest value
const longestVal = values.reduce(
(acc, val) => (val.length > acc.length ? val : acc),
'',
);
if (longestVal === '' || !axis.font?.[0]) {
return 0;
}
self.ctx.font = axis.font[0];
return self.ctx.measureText(longestVal).width / devicePixelRatio;
}
export function getExistingAxisSize({
uplotInstance,
axis,
values,
axisIdx,
cycleNum,
}: {
uplotInstance: uPlot;
axis: Axis;
values?: string[];
axisIdx: number;
cycleNum: number;
}): number {
const internalSize = (axis as { _size?: number })._size;
if (internalSize !== undefined) {
return internalSize;
}
const existingSize = axis.size;
if (typeof existingSize === 'function') {
return existingSize(uplotInstance, values ?? [], axisIdx, cycleNum);
}
return existingSize ?? 0;
}
export function buildYAxisSizeCalculator(gap: number): uPlot.Axis.Size {
return (
self: uPlot,
values: string[] | undefined,
axisIdx: number,
cycleNum: number,
): number => {
const axis = self.axes[axisIdx];
// Bail out, force convergence
if (cycleNum > 1) {
return getExistingAxisSize({
uplotInstance: self,
axis,
values,
axisIdx,
cycleNum,
});
}
let axisSize = (axis.ticks?.size ?? 0) + gap;
axisSize += calculateTextWidth(self, axis, values);
return Math.ceil(axisSize);
};
}

View File

@@ -1,25 +1,11 @@
import { SeriesVisibilityState } from 'container/DashboardContainer/visualization/panels/types';
export function resolveSeriesVisibility({
seriesIndex,
seriesShow,
seriesLabel,
seriesVisibilityState,
isAnySeriesHidden,
}: {
seriesIndex: number;
seriesShow: boolean | undefined | null;
seriesLabel: string;
seriesVisibilityState: SeriesVisibilityState | null;
isAnySeriesHidden: boolean;
}): boolean {
if (
isAnySeriesHidden &&
seriesVisibilityState?.visibility &&
seriesVisibilityState.labels.length > seriesIndex &&
seriesVisibilityState.labels[seriesIndex] === seriesLabel
) {
return seriesVisibilityState.visibility[seriesIndex] ?? false;
export function resolveSeriesVisibility(
label: string,
seriesShow: boolean | undefined | null,
visibilityMap: Map<string, boolean> | null,
isAnySeriesHidden: boolean,
): boolean {
if (isAnySeriesHidden) {
return visibilityMap?.get(label) ?? false;
}
return seriesShow ?? true;
}

View File

@@ -0,0 +1,44 @@
package middleware
import (
"log/slog"
"net/http"
"runtime/debug"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/render"
)
// Recovery is a middleware that recovers from panics, logs the panic,
// and returns a 500 Internal Server Error.
type Recovery struct {
logger *slog.Logger
}
// NewRecovery creates a new Recovery middleware.
func NewRecovery(logger *slog.Logger) Wrapper {
return &Recovery{
logger: logger.With("pkg", "http-middleware-recovery"),
}
}
// Wrap is the middleware handler.
func (m *Recovery) Wrap(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer func() {
if err := recover(); err != nil {
m.logger.ErrorContext(
r.Context(),
"panic recovered",
"err", err, "stack", string(debug.Stack()),
)
render.Error(w, errors.NewInternalf(
errors.CodeInternal, "internal server error",
))
}
}()
next.ServeHTTP(w, r)
})
}

View File

@@ -13,6 +13,7 @@ import (
root "github.com/SigNoz/signoz/pkg/modules/user"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/integrationstypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/gorilla/mux"
)
@@ -462,7 +463,7 @@ func (h *handler) UpdateAPIKey(w http.ResponseWriter, r *http.Request) {
return
}
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(createdByUser.Email.String())) {
if slices.Contains(integrationstypes.IntegrationUserEmails, createdByUser.Email) {
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "API Keys for integration users cannot be revoked"))
return
}
@@ -507,7 +508,7 @@ func (h *handler) RevokeAPIKey(w http.ResponseWriter, r *http.Request) {
return
}
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(createdByUser.Email.String())) {
if slices.Contains(integrationstypes.IntegrationUserEmails, createdByUser.Email) {
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "API Keys for integration users cannot be revoked"))
return
}

View File

@@ -19,6 +19,7 @@ import (
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/emailtypes"
"github.com/SigNoz/signoz/pkg/types/integrationstypes"
"github.com/SigNoz/signoz/pkg/types/roletypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/dustin/go-humanize"
@@ -171,7 +172,7 @@ func (m *Module) DeleteInvite(ctx context.Context, orgID string, id valuer.UUID)
func (module *Module) CreateUser(ctx context.Context, input *types.User, opts ...root.CreateUserOption) error {
createUserOpts := root.NewCreateUserOptions(opts...)
// since assign is idempotant multiple calls to assign won't cause issues in case of retries.
// since assign is idempotent multiple calls to assign won't cause issues in case of retries.
err := module.authz.Grant(ctx, input.OrgID, roletypes.MustGetSigNozManagedRoleFromExistingRole(input.Role), authtypes.MustNewSubject(authtypes.TypeableUser, input.ID.StringValue(), input.OrgID, nil))
if err != nil {
return err
@@ -286,7 +287,7 @@ func (module *Module) DeleteUser(ctx context.Context, orgID valuer.UUID, id stri
return err
}
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(user.Email.String())) {
if slices.Contains(integrationstypes.IntegrationUserEmails, user.Email) {
return errors.New(errors.TypeForbidden, errors.CodeForbidden, "integration user cannot be deleted")
}
@@ -300,7 +301,7 @@ func (module *Module) DeleteUser(ctx context.Context, orgID valuer.UUID, id stri
return errors.New(errors.TypeForbidden, errors.CodeForbidden, "cannot delete the last admin")
}
// since revoke is idempotant multiple calls to revoke won't cause issues in case of retries
// since revoke is idempotent multiple calls to revoke won't cause issues in case of retries
err = module.authz.Revoke(ctx, orgID, roletypes.MustGetSigNozManagedRoleFromExistingRole(user.Role), authtypes.MustNewSubject(authtypes.TypeableUser, id, orgID, nil))
if err != nil {
return err

View File

@@ -1,624 +0,0 @@
package cloudintegrations
import (
"context"
"fmt"
"net/url"
"slices"
"strings"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"golang.org/x/exp/maps"
)
var SupportedCloudProviders = []string{
"aws",
}
func validateCloudProviderName(name string) *model.ApiError {
if !slices.Contains(SupportedCloudProviders, name) {
return model.BadRequest(fmt.Errorf("invalid cloud provider: %s", name))
}
return nil
}
type Controller struct {
accountsRepo cloudProviderAccountsRepository
serviceConfigRepo ServiceConfigDatabase
}
func NewController(sqlStore sqlstore.SQLStore) (*Controller, error) {
accountsRepo, err := newCloudProviderAccountsRepository(sqlStore)
if err != nil {
return nil, fmt.Errorf("couldn't create cloud provider accounts repo: %w", err)
}
serviceConfigRepo, err := newServiceConfigRepository(sqlStore)
if err != nil {
return nil, fmt.Errorf("couldn't create cloud provider service config repo: %w", err)
}
return &Controller{
accountsRepo: accountsRepo,
serviceConfigRepo: serviceConfigRepo,
}, nil
}
type ConnectedAccountsListResponse struct {
Accounts []types.Account `json:"accounts"`
}
func (c *Controller) ListConnectedAccounts(ctx context.Context, orgId string, cloudProvider string) (
*ConnectedAccountsListResponse, *model.ApiError,
) {
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
return nil, apiErr
}
accountRecords, apiErr := c.accountsRepo.listConnected(ctx, orgId, cloudProvider)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't list cloud accounts")
}
connectedAccounts := []types.Account{}
for _, a := range accountRecords {
connectedAccounts = append(connectedAccounts, a.Account())
}
return &ConnectedAccountsListResponse{
Accounts: connectedAccounts,
}, nil
}
type GenerateConnectionUrlRequest struct {
// Optional. To be specified for updates.
AccountId *string `json:"account_id,omitempty"`
AccountConfig types.AccountConfig `json:"account_config"`
AgentConfig SigNozAgentConfig `json:"agent_config"`
}
type SigNozAgentConfig struct {
// The region in which SigNoz agent should be installed.
Region string `json:"region"`
IngestionUrl string `json:"ingestion_url"`
IngestionKey string `json:"ingestion_key"`
SigNozAPIUrl string `json:"signoz_api_url"`
SigNozAPIKey string `json:"signoz_api_key"`
Version string `json:"version,omitempty"`
}
type GenerateConnectionUrlResponse struct {
AccountId string `json:"account_id"`
ConnectionUrl string `json:"connection_url"`
}
func (c *Controller) GenerateConnectionUrl(ctx context.Context, orgId string, cloudProvider string, req GenerateConnectionUrlRequest) (*GenerateConnectionUrlResponse, *model.ApiError) {
// Account connection with a simple connection URL may not be available for all providers.
if cloudProvider != "aws" {
return nil, model.BadRequest(fmt.Errorf("unsupported cloud provider: %s", cloudProvider))
}
account, apiErr := c.accountsRepo.upsert(
ctx, orgId, cloudProvider, req.AccountId, &req.AccountConfig, nil, nil, nil,
)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
}
agentVersion := "v0.0.8"
if req.AgentConfig.Version != "" {
agentVersion = req.AgentConfig.Version
}
connectionUrl := fmt.Sprintf(
"https://%s.console.aws.amazon.com/cloudformation/home?region=%s#/stacks/quickcreate?",
req.AgentConfig.Region, req.AgentConfig.Region,
)
for qp, value := range map[string]string{
"param_SigNozIntegrationAgentVersion": agentVersion,
"param_SigNozApiUrl": req.AgentConfig.SigNozAPIUrl,
"param_SigNozApiKey": req.AgentConfig.SigNozAPIKey,
"param_SigNozAccountId": account.ID.StringValue(),
"param_IngestionUrl": req.AgentConfig.IngestionUrl,
"param_IngestionKey": req.AgentConfig.IngestionKey,
"stackName": "signoz-integration",
"templateURL": fmt.Sprintf(
"https://signoz-integrations.s3.us-east-1.amazonaws.com/aws-quickcreate-template-%s.json",
agentVersion,
),
} {
connectionUrl += fmt.Sprintf("&%s=%s", qp, url.QueryEscape(value))
}
return &GenerateConnectionUrlResponse{
AccountId: account.ID.StringValue(),
ConnectionUrl: connectionUrl,
}, nil
}
type AccountStatusResponse struct {
Id string `json:"id"`
CloudAccountId *string `json:"cloud_account_id,omitempty"`
Status types.AccountStatus `json:"status"`
}
func (c *Controller) GetAccountStatus(ctx context.Context, orgId string, cloudProvider string, accountId string) (
*AccountStatusResponse, *model.ApiError,
) {
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
return nil, apiErr
}
account, apiErr := c.accountsRepo.get(ctx, orgId, cloudProvider, accountId)
if apiErr != nil {
return nil, apiErr
}
resp := AccountStatusResponse{
Id: account.ID.StringValue(),
CloudAccountId: account.AccountID,
Status: account.Status(),
}
return &resp, nil
}
type AgentCheckInRequest struct {
ID string `json:"account_id"`
AccountID string `json:"cloud_account_id"`
// Arbitrary cloud specific Agent data
Data map[string]any `json:"data,omitempty"`
}
type AgentCheckInResponse struct {
AccountId string `json:"account_id"`
CloudAccountId string `json:"cloud_account_id"`
RemovedAt *time.Time `json:"removed_at"`
IntegrationConfig IntegrationConfigForAgent `json:"integration_config"`
}
type IntegrationConfigForAgent struct {
EnabledRegions []string `json:"enabled_regions"`
TelemetryCollectionStrategy *CompiledCollectionStrategy `json:"telemetry,omitempty"`
}
func (c *Controller) CheckInAsAgent(ctx context.Context, orgId string, cloudProvider string, req AgentCheckInRequest) (*AgentCheckInResponse, error) {
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
return nil, apiErr
}
existingAccount, apiErr := c.accountsRepo.get(ctx, orgId, cloudProvider, req.ID)
if existingAccount != nil && existingAccount.AccountID != nil && *existingAccount.AccountID != req.AccountID {
return nil, model.BadRequest(fmt.Errorf(
"can't check in with new %s account id %s for account %s with existing %s id %s",
cloudProvider, req.AccountID, existingAccount.ID.StringValue(), cloudProvider, *existingAccount.AccountID,
))
}
existingAccount, apiErr = c.accountsRepo.getConnectedCloudAccount(ctx, orgId, cloudProvider, req.AccountID)
if existingAccount != nil && existingAccount.ID.StringValue() != req.ID {
return nil, model.BadRequest(fmt.Errorf(
"can't check in to %s account %s with id %s. already connected with id %s",
cloudProvider, req.AccountID, req.ID, existingAccount.ID.StringValue(),
))
}
agentReport := types.AgentReport{
TimestampMillis: time.Now().UnixMilli(),
Data: req.Data,
}
account, apiErr := c.accountsRepo.upsert(
ctx, orgId, cloudProvider, &req.ID, nil, &req.AccountID, &agentReport, nil,
)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
}
// prepare and return integration config to be consumed by agent
compiledStrategy, err := NewCompiledCollectionStrategy(cloudProvider)
if err != nil {
return nil, model.InternalError(fmt.Errorf(
"couldn't init telemetry collection strategy: %w", err,
))
}
agentConfig := IntegrationConfigForAgent{
EnabledRegions: []string{},
TelemetryCollectionStrategy: compiledStrategy,
}
if account.Config != nil && account.Config.EnabledRegions != nil {
agentConfig.EnabledRegions = account.Config.EnabledRegions
}
services, err := services.Map(cloudProvider)
if err != nil {
return nil, err
}
svcConfigs, apiErr := c.serviceConfigRepo.getAllForAccount(
ctx, orgId, account.ID.StringValue(),
)
if apiErr != nil {
return nil, model.WrapApiError(
apiErr, "couldn't get service configs for cloud account",
)
}
// accumulate config in a fixed order to ensure same config generated across runs
configuredServices := maps.Keys(svcConfigs)
slices.Sort(configuredServices)
for _, svcType := range configuredServices {
definition, ok := services[svcType]
if !ok {
continue
}
config := svcConfigs[svcType]
err := AddServiceStrategy(svcType, compiledStrategy, definition.Strategy, config)
if err != nil {
return nil, err
}
}
return &AgentCheckInResponse{
AccountId: account.ID.StringValue(),
CloudAccountId: *account.AccountID,
RemovedAt: account.RemovedAt,
IntegrationConfig: agentConfig,
}, nil
}
type UpdateAccountConfigRequest struct {
Config types.AccountConfig `json:"config"`
}
func (c *Controller) UpdateAccountConfig(ctx context.Context, orgId string, cloudProvider string, accountId string, req UpdateAccountConfigRequest) (*types.Account, *model.ApiError) {
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
return nil, apiErr
}
accountRecord, apiErr := c.accountsRepo.upsert(
ctx, orgId, cloudProvider, &accountId, &req.Config, nil, nil, nil,
)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
}
account := accountRecord.Account()
return &account, nil
}
func (c *Controller) DisconnectAccount(ctx context.Context, orgId string, cloudProvider string, accountId string) (*types.CloudIntegration, *model.ApiError) {
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
return nil, apiErr
}
account, apiErr := c.accountsRepo.get(ctx, orgId, cloudProvider, accountId)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't disconnect account")
}
tsNow := time.Now()
account, apiErr = c.accountsRepo.upsert(
ctx, orgId, cloudProvider, &accountId, nil, nil, nil, &tsNow,
)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't disconnect account")
}
return account, nil
}
type ListServicesResponse struct {
Services []ServiceSummary `json:"services"`
}
func (c *Controller) ListServices(
ctx context.Context,
orgID string,
cloudProvider string,
cloudAccountId *string,
) (*ListServicesResponse, *model.ApiError) {
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
return nil, apiErr
}
definitions, apiErr := services.List(cloudProvider)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't list cloud services")
}
svcConfigs := map[string]*types.CloudServiceConfig{}
if cloudAccountId != nil {
activeAccount, apiErr := c.accountsRepo.getConnectedCloudAccount(
ctx, orgID, cloudProvider, *cloudAccountId,
)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't get active account")
}
svcConfigs, apiErr = c.serviceConfigRepo.getAllForAccount(
ctx, orgID, activeAccount.ID.StringValue(),
)
if apiErr != nil {
return nil, model.WrapApiError(
apiErr, "couldn't get service configs for cloud account",
)
}
}
summaries := []ServiceSummary{}
for _, def := range definitions {
summary := ServiceSummary{
Metadata: def.Metadata,
}
summary.Config = svcConfigs[summary.Id]
summaries = append(summaries, summary)
}
return &ListServicesResponse{
Services: summaries,
}, nil
}
func (c *Controller) GetServiceDetails(
ctx context.Context,
orgID string,
cloudProvider string,
serviceId string,
cloudAccountId *string,
) (*ServiceDetails, error) {
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
return nil, apiErr
}
definition, err := services.GetServiceDefinition(cloudProvider, serviceId)
if err != nil {
return nil, err
}
details := ServiceDetails{
Definition: *definition,
}
if cloudAccountId != nil {
activeAccount, apiErr := c.accountsRepo.getConnectedCloudAccount(
ctx, orgID, cloudProvider, *cloudAccountId,
)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't get active account")
}
config, apiErr := c.serviceConfigRepo.get(
ctx, orgID, activeAccount.ID.StringValue(), serviceId,
)
if apiErr != nil && apiErr.Type() != model.ErrorNotFound {
return nil, model.WrapApiError(apiErr, "couldn't fetch service config")
}
if config != nil {
details.Config = config
enabled := false
if config.Metrics != nil && config.Metrics.Enabled {
enabled = true
}
// add links to service dashboards, making them clickable.
for i, d := range definition.Assets.Dashboards {
dashboardUuid := c.dashboardUuid(
cloudProvider, serviceId, d.Id,
)
if enabled {
definition.Assets.Dashboards[i].Url = fmt.Sprintf("/dashboard/%s", dashboardUuid)
} else {
definition.Assets.Dashboards[i].Url = "" // to unset the in-memory URL if enabled once and disabled afterwards
}
}
}
}
return &details, nil
}
type UpdateServiceConfigRequest struct {
CloudAccountId string `json:"cloud_account_id"`
Config types.CloudServiceConfig `json:"config"`
}
func (u *UpdateServiceConfigRequest) Validate(def *services.Definition) error {
if def.Id != services.S3Sync && u.Config.Logs != nil && u.Config.Logs.S3Buckets != nil {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "s3 buckets can only be added to service-type[%s]", services.S3Sync)
} else if def.Id == services.S3Sync && u.Config.Logs != nil && u.Config.Logs.S3Buckets != nil {
for region := range u.Config.Logs.S3Buckets {
if _, found := ValidAWSRegions[region]; !found {
return errors.NewInvalidInputf(CodeInvalidCloudRegion, "invalid cloud region: %s", region)
}
}
}
return nil
}
type UpdateServiceConfigResponse struct {
Id string `json:"id"`
Config types.CloudServiceConfig `json:"config"`
}
func (c *Controller) UpdateServiceConfig(
ctx context.Context,
orgID string,
cloudProvider string,
serviceType string,
req *UpdateServiceConfigRequest,
) (*UpdateServiceConfigResponse, error) {
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
return nil, apiErr
}
// can only update config for a valid service.
definition, err := services.GetServiceDefinition(cloudProvider, serviceType)
if err != nil {
return nil, err
}
if err := req.Validate(definition); err != nil {
return nil, err
}
// can only update config for a connected cloud account id
_, apiErr := c.accountsRepo.getConnectedCloudAccount(
ctx, orgID, cloudProvider, req.CloudAccountId,
)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't find connected cloud account")
}
updatedConfig, apiErr := c.serviceConfigRepo.upsert(
ctx, orgID, cloudProvider, req.CloudAccountId, serviceType, req.Config,
)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't update service config")
}
return &UpdateServiceConfigResponse{
Id: serviceType,
Config: *updatedConfig,
}, nil
}
// All dashboards that are available based on cloud integrations configuration
// across all cloud providers
func (c *Controller) AvailableDashboards(ctx context.Context, orgId valuer.UUID) ([]*dashboardtypes.Dashboard, *model.ApiError) {
allDashboards := []*dashboardtypes.Dashboard{}
for _, provider := range []string{"aws"} {
providerDashboards, apiErr := c.AvailableDashboardsForCloudProvider(ctx, orgId, provider)
if apiErr != nil {
return nil, model.WrapApiError(
apiErr, fmt.Sprintf("couldn't get available dashboards for %s", provider),
)
}
allDashboards = append(allDashboards, providerDashboards...)
}
return allDashboards, nil
}
func (c *Controller) AvailableDashboardsForCloudProvider(ctx context.Context, orgID valuer.UUID, cloudProvider string) ([]*dashboardtypes.Dashboard, *model.ApiError) {
accountRecords, apiErr := c.accountsRepo.listConnected(ctx, orgID.StringValue(), cloudProvider)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't list connected cloud accounts")
}
// for v0, service dashboards are only available when metrics are enabled.
servicesWithAvailableMetrics := map[string]*time.Time{}
for _, ar := range accountRecords {
if ar.AccountID != nil {
configsBySvcId, apiErr := c.serviceConfigRepo.getAllForAccount(
ctx, orgID.StringValue(), ar.ID.StringValue(),
)
if apiErr != nil {
return nil, apiErr
}
for svcId, config := range configsBySvcId {
if config.Metrics != nil && config.Metrics.Enabled {
servicesWithAvailableMetrics[svcId] = &ar.CreatedAt
}
}
}
}
allServices, apiErr := services.List(cloudProvider)
if apiErr != nil {
return nil, apiErr
}
svcDashboards := []*dashboardtypes.Dashboard{}
for _, svc := range allServices {
serviceDashboardsCreatedAt := servicesWithAvailableMetrics[svc.Id]
if serviceDashboardsCreatedAt != nil {
for _, d := range svc.Assets.Dashboards {
author := fmt.Sprintf("%s-integration", cloudProvider)
svcDashboards = append(svcDashboards, &dashboardtypes.Dashboard{
ID: c.dashboardUuid(cloudProvider, svc.Id, d.Id),
Locked: true,
Data: *d.Definition,
TimeAuditable: types.TimeAuditable{
CreatedAt: *serviceDashboardsCreatedAt,
UpdatedAt: *serviceDashboardsCreatedAt,
},
UserAuditable: types.UserAuditable{
CreatedBy: author,
UpdatedBy: author,
},
OrgID: orgID,
})
}
servicesWithAvailableMetrics[svc.Id] = nil
}
}
return svcDashboards, nil
}
func (c *Controller) GetDashboardById(ctx context.Context, orgId valuer.UUID, dashboardUuid string) (*dashboardtypes.Dashboard, *model.ApiError) {
cloudProvider, _, _, apiErr := c.parseDashboardUuid(dashboardUuid)
if apiErr != nil {
return nil, apiErr
}
allDashboards, apiErr := c.AvailableDashboardsForCloudProvider(ctx, orgId, cloudProvider)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't list available dashboards")
}
for _, d := range allDashboards {
if d.ID == dashboardUuid {
return d, nil
}
}
return nil, model.NotFoundError(fmt.Errorf("couldn't find dashboard with uuid: %s", dashboardUuid))
}
func (c *Controller) dashboardUuid(
cloudProvider string, svcId string, dashboardId string,
) string {
return fmt.Sprintf("cloud-integration--%s--%s--%s", cloudProvider, svcId, dashboardId)
}
func (c *Controller) parseDashboardUuid(dashboardUuid string) (cloudProvider string, svcId string, dashboardId string, apiErr *model.ApiError) {
parts := strings.SplitN(dashboardUuid, "--", 4)
if len(parts) != 4 || parts[0] != "cloud-integration" {
return "", "", "", model.BadRequest(fmt.Errorf("invalid cloud integration dashboard id"))
}
return parts[1], parts[2], parts[3], nil
}
func (c *Controller) IsCloudIntegrationDashboardUuid(dashboardUuid string) bool {
_, _, _, apiErr := c.parseDashboardUuid(dashboardUuid)
return apiErr == nil
}

View File

@@ -0,0 +1,797 @@
package implawsprovider
import (
"context"
"fmt"
"log/slog"
"net/url"
"slices"
"sort"
"sync"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/querier"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
integrationstore "github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/store"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
"github.com/SigNoz/signoz/pkg/types/integrationstypes"
"github.com/SigNoz/signoz/pkg/types/metrictypes"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/SigNoz/signoz/pkg/valuer"
"golang.org/x/exp/maps"
)
var (
CodeInvalidAWSRegion = errors.MustNewCode("invalid_aws_region")
CodeDashboardNotFound = errors.MustNewCode("dashboard_not_found")
)
type awsProvider struct {
logger *slog.Logger
querier querier.Querier
accountsRepo integrationstore.CloudProviderAccountsRepository
serviceConfigRepo integrationstore.ServiceConfigDatabase
serviceDefinitions *services.ServicesProvider[*integrationstypes.AWSDefinition]
}
func NewAWSCloudProvider(
logger *slog.Logger,
accountsRepo integrationstore.CloudProviderAccountsRepository,
serviceConfigRepo integrationstore.ServiceConfigDatabase,
querier querier.Querier,
) integrationstypes.CloudProvider {
serviceDefinitions, err := services.NewAWSCloudProviderServices()
if err != nil {
panic("failed to initialize AWS service definitions: " + err.Error())
}
return &awsProvider{
logger: logger,
querier: querier,
accountsRepo: accountsRepo,
serviceConfigRepo: serviceConfigRepo,
serviceDefinitions: serviceDefinitions,
}
}
func (a *awsProvider) GetAccountStatus(ctx context.Context, orgID, accountID string) (*integrationstypes.GettableAccountStatus, error) {
accountRecord, err := a.accountsRepo.Get(ctx, orgID, a.GetName().String(), accountID)
if err != nil {
return nil, err
}
return &integrationstypes.GettableAccountStatus{
Id: accountRecord.ID.String(),
CloudAccountId: accountRecord.AccountID,
Status: accountRecord.Status(),
}, nil
}
func (a *awsProvider) ListConnectedAccounts(ctx context.Context, orgID string) (*integrationstypes.GettableConnectedAccountsList, error) {
accountRecords, err := a.accountsRepo.ListConnected(ctx, orgID, a.GetName().String())
if err != nil {
return nil, err
}
connectedAccounts := make([]*integrationstypes.Account, 0, len(accountRecords))
for _, r := range accountRecords {
connectedAccounts = append(connectedAccounts, r.Account(a.GetName()))
}
return &integrationstypes.GettableConnectedAccountsList{
Accounts: connectedAccounts,
}, nil
}
func (a *awsProvider) AgentCheckIn(ctx context.Context, req *integrationstypes.PostableAgentCheckInPayload) (any, error) {
// agent can't check in unless the account is already created
existingAccount, err := a.accountsRepo.Get(ctx, req.OrgID, a.GetName().String(), req.ID)
if err != nil {
return nil, err
}
if existingAccount != nil && existingAccount.AccountID != nil && *existingAccount.AccountID != req.AccountID {
return nil, model.BadRequest(fmt.Errorf(
"can't check in with new %s account id %s for account %s with existing %s id %s",
a.GetName().String(), req.AccountID, existingAccount.ID.StringValue(), a.GetName().String(),
*existingAccount.AccountID,
))
}
existingAccount, err = a.accountsRepo.GetConnectedCloudAccount(ctx, req.OrgID, a.GetName().String(), req.AccountID)
if existingAccount != nil && existingAccount.ID.StringValue() != req.ID {
return nil, model.BadRequest(fmt.Errorf(
"can't check in to %s account %s with id %s. already connected with id %s",
a.GetName().String(), req.AccountID, req.ID, existingAccount.ID.StringValue(),
))
}
agentReport := integrationstypes.AgentReport{
TimestampMillis: time.Now().UnixMilli(),
Data: req.Data,
}
account, err := a.accountsRepo.Upsert(
ctx, req.OrgID, a.GetName().String(), &req.ID, nil, &req.AccountID, &agentReport, nil,
)
if err != nil {
return nil, err
}
agentConfig, err := a.getAWSAgentConfig(ctx, account)
if err != nil {
return nil, err
}
return &integrationstypes.GettableAWSAgentCheckIn{
AccountId: account.ID.StringValue(),
CloudAccountId: *account.AccountID,
RemovedAt: account.RemovedAt,
IntegrationConfig: *agentConfig,
}, nil
}
func (a *awsProvider) getAWSAgentConfig(ctx context.Context, account *integrationstypes.CloudIntegration) (*integrationstypes.AWSAgentIntegrationConfig, error) {
// prepare and return integration config to be consumed by agent
agentConfig := &integrationstypes.AWSAgentIntegrationConfig{
EnabledRegions: []string{},
TelemetryCollectionStrategy: &integrationstypes.AWSCollectionStrategy{
Metrics: &integrationstypes.AWSMetricsStrategy{},
Logs: &integrationstypes.AWSLogsStrategy{},
},
}
accountConfig := new(integrationstypes.AWSAccountConfig)
err := integrationstypes.UnmarshalJSON([]byte(account.Config), accountConfig)
if err != nil {
return nil, err
}
if accountConfig.EnabledRegions != nil {
agentConfig.EnabledRegions = accountConfig.EnabledRegions
}
svcConfigs, err := a.serviceConfigRepo.GetAllForAccount(
ctx, account.OrgID, account.ID.StringValue(),
)
if err != nil {
return nil, err
}
// accumulate config in a fixed order to ensure same config generated across runs
configuredServices := maps.Keys(svcConfigs)
slices.Sort(configuredServices)
for _, svcType := range configuredServices {
definition, err := a.serviceDefinitions.GetServiceDefinition(ctx, svcType)
if err != nil {
continue
}
config := svcConfigs[svcType]
serviceConfig := new(integrationstypes.AWSCloudServiceConfig)
err = integrationstypes.UnmarshalJSON(config, serviceConfig)
if err != nil {
continue
}
if serviceConfig.Logs != nil && serviceConfig.Logs.Enabled {
if svcType == integrationstypes.S3Sync {
// S3 bucket sync; No cloudwatch logs are appended for this service type;
// Though definition is populated with a custom cloudwatch group that helps in calculating logs connection status
agentConfig.TelemetryCollectionStrategy.S3Buckets = serviceConfig.Logs.S3Buckets
} else if definition.Strategy != nil && definition.Strategy.Logs != nil { // services that includes a logs subscription
agentConfig.TelemetryCollectionStrategy.Logs.Subscriptions = append(
agentConfig.TelemetryCollectionStrategy.Logs.Subscriptions,
definition.Strategy.Logs.Subscriptions...,
)
}
}
if serviceConfig.Metrics != nil && serviceConfig.Metrics.Enabled && definition.Strategy != nil && definition.Strategy.Metrics != nil {
agentConfig.TelemetryCollectionStrategy.Metrics.StreamFilters = append(
agentConfig.TelemetryCollectionStrategy.Metrics.StreamFilters,
definition.Strategy.Metrics.StreamFilters...,
)
}
}
return agentConfig, nil
}
func (a *awsProvider) GetName() integrationstypes.CloudProviderType {
return integrationstypes.CloudProviderAWS
}
func (a *awsProvider) ListServices(ctx context.Context, orgID string, cloudAccountID *string) (any, error) {
svcConfigs := make(map[string]*integrationstypes.AWSCloudServiceConfig)
if cloudAccountID != nil {
activeAccount, err := a.accountsRepo.GetConnectedCloudAccount(ctx, orgID, a.GetName().String(), *cloudAccountID)
if err != nil {
return nil, err
}
serviceConfigs, err := a.serviceConfigRepo.GetAllForAccount(ctx, orgID, activeAccount.ID.String())
if err != nil {
return nil, err
}
for svcType, config := range serviceConfigs {
serviceConfig := new(integrationstypes.AWSCloudServiceConfig)
err = integrationstypes.UnmarshalJSON(config, serviceConfig)
if err != nil {
return nil, err
}
svcConfigs[svcType] = serviceConfig
}
}
summaries := make([]integrationstypes.AWSServiceSummary, 0)
definitions, err := a.serviceDefinitions.ListServiceDefinitions(ctx)
if err != nil {
return nil, model.InternalError(fmt.Errorf("couldn't list aws service definitions: %w", err))
}
for _, def := range definitions {
summary := integrationstypes.AWSServiceSummary{
DefinitionMetadata: def.DefinitionMetadata,
Config: nil,
}
summary.Config = svcConfigs[summary.Id]
summaries = append(summaries, summary)
}
sort.Slice(summaries, func(i, j int) bool {
return summaries[i].DefinitionMetadata.Title < summaries[j].DefinitionMetadata.Title
})
return &integrationstypes.GettableAWSServices{
Services: summaries,
}, nil
}
func (a *awsProvider) GetServiceDetails(ctx context.Context, req *integrationstypes.GetServiceDetailsReq) (any, error) {
details := new(integrationstypes.GettableAWSServiceDetails)
awsDefinition, err := a.serviceDefinitions.GetServiceDefinition(ctx, req.ServiceId)
if err != nil {
return nil, model.InternalError(fmt.Errorf("couldn't get aws service definition: %w", err))
}
details.AWSDefinition = *awsDefinition
if req.CloudAccountID == nil {
return details, nil
}
config, err := a.getServiceConfig(ctx, &details.AWSDefinition, req.OrgID, a.GetName().String(), req.ServiceId, *req.CloudAccountID)
if err != nil {
return nil, err
}
if config == nil {
return details, nil
}
details.Config = config
connectionStatus, err := a.getServiceConnectionStatus(
ctx,
*req.CloudAccountID,
req.OrgID,
&details.AWSDefinition,
config,
)
if err != nil {
return nil, err
}
details.ConnectionStatus = connectionStatus
return details, nil
}
func (a *awsProvider) getServiceConnectionStatus(
ctx context.Context,
cloudAccountID string,
orgID valuer.UUID,
def *integrationstypes.AWSDefinition,
serviceConfig *integrationstypes.AWSCloudServiceConfig,
) (*integrationstypes.ServiceConnectionStatus, error) {
if def.Strategy == nil {
return nil, nil
}
resp := new(integrationstypes.ServiceConnectionStatus)
wg := sync.WaitGroup{}
wg.Add(2)
if def.Strategy.Metrics != nil && serviceConfig.Metrics != nil && serviceConfig.Metrics.Enabled {
go func() {
defer func() {
if r := recover(); r != nil {
a.logger.ErrorContext(
ctx, "panic while getting service metrics connection status",
"error", r,
"service", def.DefinitionMetadata.Id,
)
}
}()
defer wg.Done()
status, _ := a.getServiceMetricsConnectionStatus(ctx, cloudAccountID, orgID, def)
resp.Metrics = status
}()
}
if def.Strategy.Logs != nil && serviceConfig.Logs != nil && serviceConfig.Logs.Enabled {
go func() {
defer func() {
if r := recover(); r != nil {
a.logger.ErrorContext(
ctx, "panic while getting service logs connection status",
"error", r,
"service", def.DefinitionMetadata.Id,
)
}
}()
defer wg.Done()
status, _ := a.getServiceLogsConnectionStatus(ctx, cloudAccountID, orgID, def)
resp.Logs = status
}()
}
wg.Wait()
return resp, nil
}
func (a *awsProvider) getServiceMetricsConnectionStatus(
ctx context.Context,
cloudAccountID string,
orgID valuer.UUID,
def *integrationstypes.AWSDefinition,
) ([]*integrationstypes.SignalConnectionStatus, error) {
if def.Strategy == nil ||
def.Strategy.Metrics == nil ||
len(def.Strategy.Metrics.StreamFilters) < 1 ||
len(def.DataCollected.Metrics) < 1 {
return nil, nil
}
statusResp := make([]*integrationstypes.SignalConnectionStatus, 0)
for _, category := range def.IngestionStatusCheck.Metrics {
queries := make([]qbtypes.QueryEnvelope, 0)
for _, check := range category.Checks {
filterExpression := fmt.Sprintf(`cloud.provider="aws" AND cloud.account.id="%s"`, cloudAccountID)
f := ""
for _, attribute := range check.Attributes {
f = fmt.Sprintf("%s %s", attribute.Name, attribute.Operator)
if attribute.Value != "" {
f = fmt.Sprintf("%s '%s'", f, attribute.Value)
}
filterExpression = fmt.Sprintf("%s AND %s", filterExpression, f)
}
queries = append(queries, qbtypes.QueryEnvelope{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: valuer.GenerateUUID().String(),
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{{
MetricName: check.Key,
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationAvg,
}},
Filter: &qbtypes.Filter{
Expression: filterExpression,
},
},
})
}
resp, err := a.querier.QueryRange(ctx, orgID, &qbtypes.QueryRangeRequest{
SchemaVersion: "v5",
Start: uint64(time.Now().Add(-time.Hour).UnixMilli()),
End: uint64(time.Now().UnixMilli()),
RequestType: qbtypes.RequestTypeScalar,
CompositeQuery: qbtypes.CompositeQuery{
Queries: queries,
},
})
if err != nil {
a.logger.DebugContext(ctx,
"error querying for service metrics connection status",
"error", err,
"service", def.DefinitionMetadata.Id,
)
continue
}
if resp != nil && len(resp.Data.Results) < 1 {
continue
}
queryResponse, ok := resp.Data.Results[0].(*qbtypes.TimeSeriesData)
if !ok {
continue
}
if queryResponse == nil ||
len(queryResponse.Aggregations) < 1 ||
len(queryResponse.Aggregations[0].Series) < 1 ||
len(queryResponse.Aggregations[0].Series[0].Values) < 1 {
continue
}
statusResp = append(statusResp, &integrationstypes.SignalConnectionStatus{
CategoryID: category.Category,
CategoryDisplayName: category.DisplayName,
LastReceivedTsMillis: queryResponse.Aggregations[0].Series[0].Values[0].Timestamp,
LastReceivedFrom: "signoz-aws-integration",
})
}
return statusResp, nil
}
func (a *awsProvider) getServiceLogsConnectionStatus(
ctx context.Context,
cloudAccountID string,
orgID valuer.UUID,
def *integrationstypes.AWSDefinition,
) ([]*integrationstypes.SignalConnectionStatus, error) {
if def.Strategy == nil ||
def.Strategy.Logs == nil ||
len(def.Strategy.Logs.Subscriptions) < 1 ||
len(def.DataCollected.Logs) < 1 {
return nil, nil
}
statusResp := make([]*integrationstypes.SignalConnectionStatus, 0)
for _, category := range def.IngestionStatusCheck.Logs {
queries := make([]qbtypes.QueryEnvelope, 0)
for _, check := range category.Checks {
filterExpression := fmt.Sprintf(`cloud.account.id="%s"`, cloudAccountID)
f := ""
for _, attribute := range check.Attributes {
f = fmt.Sprintf("%s %s", attribute.Name, attribute.Operator)
if attribute.Value != "" {
f = fmt.Sprintf("%s '%s'", f, attribute.Value)
}
filterExpression = fmt.Sprintf("%s AND %s", filterExpression, f)
}
queries = append(queries, qbtypes.QueryEnvelope{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Name: valuer.GenerateUUID().String(),
Signal: telemetrytypes.SignalLogs,
Aggregations: []qbtypes.LogAggregation{{
Expression: "count()",
}},
Filter: &qbtypes.Filter{
Expression: filterExpression,
},
Limit: 10,
Offset: 0,
},
})
}
resp, err := a.querier.QueryRange(ctx, orgID, &qbtypes.QueryRangeRequest{
SchemaVersion: "v1",
Start: uint64(time.Now().Add(-time.Hour * 1).UnixMilli()),
End: uint64(time.Now().UnixMilli()),
RequestType: qbtypes.RequestTypeTimeSeries,
CompositeQuery: qbtypes.CompositeQuery{
Queries: queries,
},
})
if err != nil {
a.logger.DebugContext(ctx,
"error querying for service logs connection status",
"error", err,
"service", def.DefinitionMetadata.Id,
)
continue
}
if resp != nil && len(resp.Data.Results) < 1 {
continue
}
queryResponse, ok := resp.Data.Results[0].(*qbtypes.TimeSeriesData)
if !ok {
continue
}
if queryResponse == nil ||
len(queryResponse.Aggregations) < 1 ||
len(queryResponse.Aggregations[0].Series) < 1 ||
len(queryResponse.Aggregations[0].Series[0].Values) < 1 {
continue
}
statusResp = append(statusResp, &integrationstypes.SignalConnectionStatus{
CategoryID: category.Category,
CategoryDisplayName: category.DisplayName,
LastReceivedTsMillis: queryResponse.Aggregations[0].Series[0].Values[0].Timestamp,
LastReceivedFrom: "signoz-aws-integration",
})
}
return statusResp, nil
}
func (a *awsProvider) getServiceConfig(
ctx context.Context,
def *integrationstypes.AWSDefinition,
orgID valuer.UUID, cloudProvider, serviceId, cloudAccountId string,
) (*integrationstypes.AWSCloudServiceConfig, error) {
activeAccount, err := a.accountsRepo.GetConnectedCloudAccount(ctx, orgID.String(), cloudProvider, cloudAccountId)
if err != nil {
return nil, err
}
config, err := a.serviceConfigRepo.Get(ctx, orgID.String(), activeAccount.ID.StringValue(), serviceId)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
return nil, nil
}
return nil, err
}
serviceConfig := new(integrationstypes.AWSCloudServiceConfig)
err = integrationstypes.UnmarshalJSON(config, serviceConfig)
if err != nil {
return nil, err
}
if config != nil && serviceConfig.Metrics != nil && serviceConfig.Metrics.Enabled {
def.PopulateDashboardURLs(a.GetName(), serviceId)
}
return serviceConfig, nil
}
func (a *awsProvider) GetAvailableDashboards(ctx context.Context, orgID valuer.UUID) ([]*dashboardtypes.Dashboard, error) {
accountRecords, err := a.accountsRepo.ListConnected(ctx, orgID.StringValue(), a.GetName().String())
if err != nil {
return nil, err
}
// for now service dashboards are only available when metrics are enabled.
servicesWithAvailableMetrics := map[string]*time.Time{}
for _, ar := range accountRecords {
if ar.AccountID != nil {
configsBySvcId, err := a.serviceConfigRepo.GetAllForAccount(ctx, orgID.StringValue(), ar.ID.StringValue())
if err != nil {
return nil, err
}
for svcId, config := range configsBySvcId {
serviceConfig := new(integrationstypes.AWSCloudServiceConfig)
err = integrationstypes.UnmarshalJSON(config, serviceConfig)
if err != nil {
return nil, err
}
if serviceConfig.Metrics != nil && serviceConfig.Metrics.Enabled {
servicesWithAvailableMetrics[svcId] = &ar.CreatedAt
}
}
}
}
svcDashboards := make([]*dashboardtypes.Dashboard, 0)
allServices, err := a.serviceDefinitions.ListServiceDefinitions(ctx)
if err != nil {
return nil, errors.WrapInternalf(err, errors.CodeInternal, "failed to list aws service definitions")
}
for _, svc := range allServices {
serviceDashboardsCreatedAt, ok := servicesWithAvailableMetrics[svc.Id]
if ok {
svcDashboards = integrationstypes.GetDashboardsFromAssets(svc.Id, a.GetName(), serviceDashboardsCreatedAt, svc.Assets)
servicesWithAvailableMetrics[svc.Id] = nil
}
}
return svcDashboards, nil
}
func (a *awsProvider) GetDashboard(ctx context.Context, req *integrationstypes.GettableDashboard) (*dashboardtypes.Dashboard, error) {
allDashboards, err := a.GetAvailableDashboards(ctx, req.OrgID)
if err != nil {
return nil, err
}
for _, d := range allDashboards {
if d.ID == req.ID {
return d, nil
}
}
return nil, errors.NewNotFoundf(CodeDashboardNotFound, "dashboard with id %s not found", req.ID)
}
func (a *awsProvider) GenerateConnectionArtifact(ctx context.Context, req *integrationstypes.PostableConnectionArtifact) (any, error) {
connection := new(integrationstypes.PostableAWSConnectionUrl)
err := integrationstypes.UnmarshalJSON(req.Data, connection)
if err != nil {
return nil, err
}
if connection.AccountConfig != nil {
for _, region := range connection.AccountConfig.EnabledRegions {
if integrationstypes.ValidAWSRegions[region] {
continue
}
return nil, errors.NewInvalidInputf(CodeInvalidAWSRegion, "invalid aws region: %s", region)
}
}
config, err := integrationstypes.MarshalJSON(connection.AccountConfig)
if err != nil {
return nil, err
}
account, err := a.accountsRepo.Upsert(
ctx, req.OrgID, integrationstypes.CloudProviderAWS.String(), nil, config,
nil, nil, nil,
)
if err != nil {
return nil, err
}
agentVersion := "v0.0.8"
if connection.AgentConfig.Version != "" {
agentVersion = connection.AgentConfig.Version
}
baseURL := fmt.Sprintf("https://%s.console.aws.amazon.com/cloudformation/home",
connection.AgentConfig.Region)
u, _ := url.Parse(baseURL)
q := u.Query()
q.Set("region", connection.AgentConfig.Region)
u.Fragment = "/stacks/quickcreate"
u.RawQuery = q.Encode()
q = u.Query()
q.Set("stackName", "signoz-integration")
q.Set("templateURL", fmt.Sprintf("https://signoz-integrations.s3.us-east-1.amazonaws.com/aws-quickcreate-template-%s.json", agentVersion))
q.Set("param_SigNozIntegrationAgentVersion", agentVersion)
q.Set("param_SigNozApiUrl", connection.AgentConfig.SigNozAPIUrl)
q.Set("param_SigNozApiKey", connection.AgentConfig.SigNozAPIKey)
q.Set("param_SigNozAccountId", account.ID.StringValue())
q.Set("param_IngestionUrl", connection.AgentConfig.IngestionUrl)
q.Set("param_IngestionKey", connection.AgentConfig.IngestionKey)
return &integrationstypes.GettableAWSConnectionUrl{
AccountId: account.ID.StringValue(),
ConnectionUrl: u.String() + "?&" + q.Encode(), // this format is required by AWS
}, nil
}
func (a *awsProvider) UpdateServiceConfig(ctx context.Context, req *integrationstypes.PatchableServiceConfig) (any, error) {
definition, err := a.serviceDefinitions.GetServiceDefinition(ctx, req.ServiceId)
if err != nil {
return nil, model.InternalError(fmt.Errorf("couldn't get aws service definition: %w", err))
}
serviceConfig := new(integrationstypes.PatchableAWSCloudServiceConfig)
err = integrationstypes.UnmarshalJSON(req.Config, serviceConfig)
if err != nil {
return nil, err
}
if err = serviceConfig.Config.Validate(definition); err != nil {
return nil, err
}
// can only update config for a connected cloud account id
_, err = a.accountsRepo.GetConnectedCloudAccount(
ctx, req.OrgID, a.GetName().String(), serviceConfig.CloudAccountId,
)
if err != nil {
return nil, err
}
serviceConfigBytes, err := integrationstypes.MarshalJSON(serviceConfig.Config)
if err != nil {
return nil, err
}
updatedConfig, err := a.serviceConfigRepo.Upsert(
ctx, req.OrgID, a.GetName().String(), serviceConfig.CloudAccountId, req.ServiceId, serviceConfigBytes,
)
if err != nil {
return nil, err
}
if err = integrationstypes.UnmarshalJSON(updatedConfig, serviceConfig); err != nil {
return nil, err
}
return &integrationstypes.PatchServiceConfigResponse{
ServiceId: req.ServiceId,
Config: serviceConfig,
}, nil
}
func (a *awsProvider) UpdateAccountConfig(ctx context.Context, req *integrationstypes.PatchableAccountConfig) (any, error) {
config := new(integrationstypes.PatchableAWSAccountConfig)
err := integrationstypes.UnmarshalJSON(req.Data, config)
if err != nil {
return nil, err
}
if config.Config == nil {
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "account config can't be null")
}
for _, region := range config.Config.EnabledRegions {
if integrationstypes.ValidAWSRegions[region] {
continue
}
return nil, errors.NewInvalidInputf(CodeInvalidAWSRegion, "invalid aws region: %s", region)
}
configBytes, err := integrationstypes.MarshalJSON(config.Config)
if err != nil {
return nil, err
}
// account must exist to update config, but it doesn't need to be connected
_, err = a.accountsRepo.Get(ctx, req.OrgID, a.GetName().String(), req.AccountId)
if err != nil {
return nil, err
}
accountRecord, err := a.accountsRepo.Upsert(
ctx, req.OrgID, a.GetName().String(), &req.AccountId, configBytes, nil, nil, nil,
)
if err != nil {
return nil, err
}
return accountRecord.Account(a.GetName()), nil
}
func (a *awsProvider) DisconnectAccount(ctx context.Context, orgID, accountID string) (*integrationstypes.CloudIntegration, error) {
account, err := a.accountsRepo.Get(ctx, orgID, a.GetName().String(), accountID)
if err != nil {
return nil, err
}
tsNow := time.Now()
account, err = a.accountsRepo.Upsert(
ctx, orgID, a.GetName().String(), &accountID, nil, nil, nil, &tsNow,
)
if err != nil {
return nil, err
}
return account, nil
}

View File

@@ -0,0 +1,592 @@
package implazureprovider
import (
"context"
"fmt"
"log/slog"
"slices"
"sort"
"strings"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/querier"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/store"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
"github.com/SigNoz/signoz/pkg/types/integrationstypes"
"github.com/SigNoz/signoz/pkg/valuer"
"golang.org/x/exp/maps"
)
var (
CodeInvalidAzureRegion = errors.MustNewCode("invalid_azure_region")
CodeDashboardNotFound = errors.MustNewCode("dashboard_not_found")
)
type azureProvider struct {
logger *slog.Logger
accountsRepo store.CloudProviderAccountsRepository
serviceConfigRepo store.ServiceConfigDatabase
azureServiceDefinitions *services.ServicesProvider[*integrationstypes.AzureDefinition]
querier querier.Querier
}
func NewAzureCloudProvider(
logger *slog.Logger,
accountsRepo store.CloudProviderAccountsRepository,
serviceConfigRepo store.ServiceConfigDatabase,
querier querier.Querier,
) integrationstypes.CloudProvider {
azureServiceDefinitions, err := services.NewAzureCloudProviderServices()
if err != nil {
panic("failed to initialize Azure service definitions: " + err.Error())
}
return &azureProvider{
logger: logger,
accountsRepo: accountsRepo,
serviceConfigRepo: serviceConfigRepo,
azureServiceDefinitions: azureServiceDefinitions,
querier: querier,
}
}
func (a *azureProvider) GetAccountStatus(ctx context.Context, orgID, accountID string) (*integrationstypes.GettableAccountStatus, error) {
account, err := a.accountsRepo.Get(ctx, orgID, a.GetName().String(), accountID)
if err != nil {
return nil, err
}
return &integrationstypes.GettableAccountStatus{
Id: account.ID.String(),
CloudAccountId: account.AccountID,
Status: account.Status(),
}, nil
}
func (a *azureProvider) ListConnectedAccounts(ctx context.Context, orgID string) (*integrationstypes.GettableConnectedAccountsList, error) {
accountRecords, err := a.accountsRepo.ListConnected(ctx, orgID, a.GetName().String())
if err != nil {
return nil, err
}
connectedAccounts := make([]*integrationstypes.Account, 0, len(accountRecords))
for _, r := range accountRecords {
connectedAccounts = append(connectedAccounts, r.Account(a.GetName()))
}
return &integrationstypes.GettableConnectedAccountsList{
Accounts: connectedAccounts,
}, nil
}
func (a *azureProvider) AgentCheckIn(ctx context.Context, req *integrationstypes.PostableAgentCheckInPayload) (any, error) {
existingAccount, err := a.accountsRepo.Get(ctx, req.OrgID, a.GetName().String(), req.ID)
if err != nil {
return nil, err
}
if existingAccount != nil && existingAccount.AccountID != nil && *existingAccount.AccountID != req.AccountID {
return nil, model.BadRequest(fmt.Errorf(
"can't check in with new %s account id %s for account %s with existing %s id %s",
a.GetName().String(), req.AccountID, existingAccount.ID.StringValue(), a.GetName().String(),
*existingAccount.AccountID,
))
}
existingAccount, err = a.accountsRepo.GetConnectedCloudAccount(ctx, req.OrgID, a.GetName().String(), req.AccountID)
if existingAccount != nil && existingAccount.ID.StringValue() != req.ID {
return nil, model.BadRequest(fmt.Errorf(
"can't check in to %s account %s with id %s. already connected with id %s",
a.GetName().String(), req.AccountID, req.ID, existingAccount.ID.StringValue(),
))
}
agentReport := integrationstypes.AgentReport{
TimestampMillis: time.Now().UnixMilli(),
Data: req.Data,
}
account, err := a.accountsRepo.Upsert(
ctx, req.OrgID, a.GetName().String(), &req.ID, nil, &req.AccountID, &agentReport, nil,
)
if err != nil {
return nil, err
}
agentConfig, err := a.getAzureAgentConfig(ctx, account)
if err != nil {
return nil, err
}
return &integrationstypes.GettableAzureAgentCheckIn{
AccountId: account.ID.StringValue(),
CloudAccountId: *account.AccountID,
RemovedAt: account.RemovedAt,
IntegrationConfig: *agentConfig,
}, nil
}
func (a *azureProvider) getAzureAgentConfig(ctx context.Context, account *integrationstypes.CloudIntegration) (*integrationstypes.AzureAgentIntegrationConfig, error) {
// prepare and return integration config to be consumed by agent
agentConfig := &integrationstypes.AzureAgentIntegrationConfig{
TelemetryCollectionStrategy: make(map[string]*integrationstypes.AzureCollectionStrategy),
}
accountConfig := new(integrationstypes.AzureAccountConfig)
err := integrationstypes.UnmarshalJSON([]byte(account.Config), accountConfig)
if err != nil {
return nil, err
}
if account.Config != "" {
agentConfig.DeploymentRegion = accountConfig.DeploymentRegion
agentConfig.EnabledResourceGroups = accountConfig.EnabledResourceGroups
}
svcConfigs, err := a.serviceConfigRepo.GetAllForAccount(
ctx, account.OrgID, account.ID.StringValue(),
)
if err != nil {
return nil, err
}
// accumulate config in a fixed order to ensure same config generated across runs
configuredServices := maps.Keys(svcConfigs)
slices.Sort(configuredServices)
metrics := make([]*integrationstypes.AzureMetricsStrategy, 0)
logs := make([]*integrationstypes.AzureLogsStrategy, 0)
for _, svcType := range configuredServices {
definition, err := a.azureServiceDefinitions.GetServiceDefinition(ctx, svcType)
if err != nil {
continue
}
config := svcConfigs[svcType]
serviceConfig := new(integrationstypes.AzureCloudServiceConfig)
err = integrationstypes.UnmarshalJSON(config, serviceConfig)
if err != nil {
continue
}
metricsStrategyMap := make(map[string]*integrationstypes.AzureMetricsStrategy)
logsStrategyMap := make(map[string]*integrationstypes.AzureLogsStrategy)
if definition.Strategy != nil && definition.Strategy.Metrics != nil {
for _, metric := range definition.Strategy.Metrics {
metricsStrategyMap[metric.Name] = metric
}
}
if definition.Strategy != nil && definition.Strategy.Logs != nil {
for _, log := range definition.Strategy.Logs {
logsStrategyMap[log.Name] = log
}
}
if serviceConfig.Metrics != nil {
for _, metric := range serviceConfig.Metrics {
if metric.Enabled {
metrics = append(metrics, &integrationstypes.AzureMetricsStrategy{
CategoryType: metricsStrategyMap[metric.Name].CategoryType,
Name: metric.Name,
})
}
}
}
if serviceConfig.Logs != nil {
for _, log := range serviceConfig.Logs {
if log.Enabled {
logs = append(logs, &integrationstypes.AzureLogsStrategy{
CategoryType: logsStrategyMap[log.Name].CategoryType,
Name: log.Name,
})
}
}
}
strategy := &integrationstypes.AzureCollectionStrategy{
Metrics: metrics,
Logs: logs,
}
agentConfig.TelemetryCollectionStrategy[svcType] = strategy
}
return agentConfig, nil
}
func (a *azureProvider) GetName() valuer.String {
return integrationstypes.CloudProviderAzure
}
func (a *azureProvider) ListServices(ctx context.Context, orgID string, cloudAccountID *string) (any, error) {
svcConfigs := make(map[string]*integrationstypes.AzureCloudServiceConfig)
if cloudAccountID != nil {
activeAccount, err := a.accountsRepo.GetConnectedCloudAccount(ctx, orgID, a.GetName().String(), *cloudAccountID)
if err != nil {
return nil, err
}
serviceConfigs, err := a.serviceConfigRepo.GetAllForAccount(ctx, orgID, activeAccount.ID.StringValue())
if err != nil {
return nil, err
}
for svcType, config := range serviceConfigs {
serviceConfig := new(integrationstypes.AzureCloudServiceConfig)
err = integrationstypes.UnmarshalJSON(config, serviceConfig)
if err != nil {
return nil, err
}
svcConfigs[svcType] = serviceConfig
}
}
summaries := make([]integrationstypes.AzureServiceSummary, 0)
definitions, err := a.azureServiceDefinitions.ListServiceDefinitions(ctx)
if err != nil {
return nil, model.InternalError(fmt.Errorf("couldn't list aws service definitions: %w", err))
}
for _, def := range definitions {
summary := integrationstypes.AzureServiceSummary{
DefinitionMetadata: def.DefinitionMetadata,
Config: nil,
}
summary.Config = svcConfigs[summary.Id]
summaries = append(summaries, summary)
}
sort.Slice(summaries, func(i, j int) bool {
return summaries[i].DefinitionMetadata.Title < summaries[j].DefinitionMetadata.Title
})
return &integrationstypes.GettableAzureServices{
Services: summaries,
}, nil
}
func (a *azureProvider) GetServiceDetails(ctx context.Context, req *integrationstypes.GetServiceDetailsReq) (any, error) {
details := new(integrationstypes.GettableAzureServiceDetails)
azureDefinition, err := a.azureServiceDefinitions.GetServiceDefinition(ctx, req.ServiceId)
if err != nil {
return nil, model.InternalError(fmt.Errorf("couldn't get aws service definition: %w", err))
}
details.AzureDefinition = *azureDefinition
if req.CloudAccountID == nil {
return details, nil
}
config, err := a.getServiceConfig(ctx, azureDefinition, req.OrgID.String(), req.ServiceId, *req.CloudAccountID)
if err != nil {
return nil, err
}
details.Config = config
// fill default values for config
if details.Config == nil {
cfg := new(integrationstypes.AzureCloudServiceConfig)
logs := make([]*integrationstypes.AzureCloudServiceLogsConfig, 0)
if azureDefinition.Strategy != nil && azureDefinition.Strategy.Logs != nil {
for _, log := range azureDefinition.Strategy.Logs {
logs = append(logs, &integrationstypes.AzureCloudServiceLogsConfig{
Enabled: false,
Name: log.Name,
})
}
}
metrics := make([]*integrationstypes.AzureCloudServiceMetricsConfig, 0)
if azureDefinition.Strategy != nil && azureDefinition.Strategy.Metrics != nil {
for _, metric := range azureDefinition.Strategy.Metrics {
metrics = append(metrics, &integrationstypes.AzureCloudServiceMetricsConfig{
Enabled: false,
Name: metric.Name,
})
}
}
cfg.Logs = logs
cfg.Metrics = metrics
details.Config = cfg
}
// TODO: write logic for getting connection status
return details, nil
}
func (a *azureProvider) getServiceConfig(
ctx context.Context,
definition *integrationstypes.AzureDefinition,
orgID string,
serviceId string,
cloudAccountId string,
) (*integrationstypes.AzureCloudServiceConfig, error) {
activeAccount, err := a.accountsRepo.GetConnectedCloudAccount(ctx, orgID, a.GetName().String(), cloudAccountId)
if err != nil {
return nil, err
}
configBytes, err := a.serviceConfigRepo.Get(ctx, orgID, activeAccount.ID.String(), serviceId)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
return nil, nil
}
return nil, err
}
config := new(integrationstypes.AzureCloudServiceConfig)
err = integrationstypes.UnmarshalJSON(configBytes, config)
if err != nil {
return nil, err
}
for _, metric := range config.Metrics {
if metric.Enabled {
definition.PopulateDashboardURLs(a.GetName(), serviceId)
break
}
}
return config, nil
}
func (a *azureProvider) GenerateConnectionArtifact(ctx context.Context, req *integrationstypes.PostableConnectionArtifact) (any, error) {
connection := new(integrationstypes.PostableAzureConnectionCommand)
err := integrationstypes.UnmarshalJSON(req.Data, connection)
if err != nil {
return nil, errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "failed unmarshal request data into AWS connection config")
}
// validate connection config
if connection.AccountConfig != nil {
if !integrationstypes.ValidAzureRegions[connection.AccountConfig.DeploymentRegion] {
return nil, errors.NewInvalidInputf(CodeInvalidAzureRegion, "invalid azure region: %s",
connection.AccountConfig.DeploymentRegion,
)
}
}
config, err := integrationstypes.MarshalJSON(connection.AccountConfig)
if err != nil {
return nil, err
}
account, err := a.accountsRepo.Upsert(
ctx, req.OrgID, a.GetName().String(), nil, config,
nil, nil, nil,
)
if err != nil {
return nil, err
}
agentVersion := "v0.0.1"
if connection.AgentConfig.Version != "" {
agentVersion = connection.AgentConfig.Version
}
// TODO: improve the command and set url
cliCommand := []string{"az", "stack", "sub", "create", "--name", "SigNozIntegration", "--location",
connection.AccountConfig.DeploymentRegion, "--template-uri", fmt.Sprintf("<url>%s", agentVersion),
"--action-on-unmanage", "deleteAll", "--deny-settings-mode", "denyDelete", "--parameters", fmt.Sprintf("rgName=%s", "signoz-integration-rg"),
fmt.Sprintf("rgLocation=%s", connection.AccountConfig.DeploymentRegion)}
return &integrationstypes.GettableAzureConnectionCommand{
AccountId: account.ID.String(),
AzureShellConnectionCommand: "az create",
AzureCliConnectionCommand: strings.Join(cliCommand, " "),
}, nil
}
func (a *azureProvider) UpdateServiceConfig(ctx context.Context, req *integrationstypes.PatchableServiceConfig) (any, error) {
definition, err := a.azureServiceDefinitions.GetServiceDefinition(ctx, req.ServiceId)
if err != nil {
return nil, err
}
serviceConfig := new(integrationstypes.PatchableAzureCloudServiceConfig)
err = integrationstypes.UnmarshalJSON(req.Config, serviceConfig)
if err != nil {
return nil, err
}
if err = serviceConfig.Config.Validate(definition); err != nil {
return nil, err
}
// can only update config for a connected cloud account id
_, err = a.accountsRepo.GetConnectedCloudAccount(
ctx, req.OrgID, a.GetName().String(), serviceConfig.CloudAccountId,
)
if err != nil {
return nil, err
}
serviceConfigBytes, err := integrationstypes.MarshalJSON(serviceConfig.Config)
if err != nil {
return nil, err
}
updatedConfig, err := a.serviceConfigRepo.Upsert(
ctx, req.OrgID, a.GetName().String(), serviceConfig.CloudAccountId, req.ServiceId, serviceConfigBytes,
)
if err != nil {
return nil, err
}
if err = integrationstypes.UnmarshalJSON(updatedConfig, serviceConfig); err != nil {
return nil, err
}
return &integrationstypes.PatchServiceConfigResponse{
ServiceId: req.ServiceId,
Config: serviceConfig,
}, nil
}
func (a *azureProvider) GetAvailableDashboards(ctx context.Context, orgID valuer.UUID) ([]*dashboardtypes.Dashboard, error) {
accountRecords, err := a.accountsRepo.ListConnected(ctx, orgID.StringValue(), a.GetName().String())
if err != nil {
return nil, err
}
// for now service dashboards are only available when metrics are enabled.
servicesWithAvailableMetrics := map[string]*time.Time{}
for _, ar := range accountRecords {
if ar.AccountID == nil {
continue
}
configsBySvcId, err := a.serviceConfigRepo.GetAllForAccount(ctx, orgID.StringValue(), ar.ID.StringValue())
if err != nil {
return nil, err
}
for svcId, config := range configsBySvcId {
serviceConfig := new(integrationstypes.AzureCloudServiceConfig)
err = integrationstypes.UnmarshalJSON(config, serviceConfig)
if err != nil {
return nil, err
}
if serviceConfig.Metrics != nil {
for _, metric := range serviceConfig.Metrics {
if metric.Enabled {
servicesWithAvailableMetrics[svcId] = &ar.CreatedAt
break
}
}
}
}
}
svcDashboards := make([]*dashboardtypes.Dashboard, 0)
allServices, err := a.azureServiceDefinitions.ListServiceDefinitions(ctx)
if err != nil {
return nil, errors.WrapInternalf(err, errors.CodeInternal, "failed to list azure service definitions")
}
for _, svc := range allServices {
serviceDashboardsCreatedAt := servicesWithAvailableMetrics[svc.Id]
if serviceDashboardsCreatedAt != nil {
svcDashboards = integrationstypes.GetDashboardsFromAssets(svc.Id, a.GetName(), serviceDashboardsCreatedAt, svc.Assets)
servicesWithAvailableMetrics[svc.Id] = nil
}
}
return svcDashboards, nil
}
func (a *azureProvider) GetDashboard(ctx context.Context, req *integrationstypes.GettableDashboard) (*dashboardtypes.Dashboard, error) {
allDashboards, err := a.GetAvailableDashboards(ctx, req.OrgID)
if err != nil {
return nil, err
}
for _, dashboard := range allDashboards {
if dashboard.ID == req.ID {
return dashboard, nil
}
}
return nil, errors.NewNotFoundf(CodeDashboardNotFound, "dashboard with id %s not found", req.ID)
}
func (a *azureProvider) UpdateAccountConfig(ctx context.Context, req *integrationstypes.PatchableAccountConfig) (any, error) {
config := new(integrationstypes.PatchableAzureAccountConfig)
err := integrationstypes.UnmarshalJSON(req.Data, config)
if err != nil {
return nil, err
}
if config.Config == nil && len(config.Config.EnabledResourceGroups) < 1 {
return nil, errors.NewInvalidInputf(CodeInvalidAzureRegion, "azure region and resource groups must be provided")
}
//for azure, preserve deployment region if already set
account, err := a.accountsRepo.Get(ctx, req.OrgID, a.GetName().String(), req.AccountId)
if err != nil {
return nil, err
}
storedConfig := new(integrationstypes.AzureAccountConfig)
err = integrationstypes.UnmarshalJSON([]byte(account.Config), storedConfig)
if err != nil {
return nil, err
}
if account.Config != "" {
config.Config.DeploymentRegion = storedConfig.DeploymentRegion
}
configBytes, err := integrationstypes.MarshalJSON(config.Config)
if err != nil {
return nil, err
}
accountRecord, err := a.accountsRepo.Upsert(
ctx, req.OrgID, a.GetName().String(), &req.AccountId, configBytes, nil, nil, nil,
)
if err != nil {
return nil, err
}
return accountRecord.Account(a.GetName()), nil
}
func (a *azureProvider) DisconnectAccount(ctx context.Context, orgID, accountID string) (*integrationstypes.CloudIntegration, error) {
account, err := a.accountsRepo.Get(ctx, orgID, a.GetName().String(), accountID)
if err != nil {
return nil, err
}
tsNow := time.Now()
account, err = a.accountsRepo.Upsert(
ctx, orgID, a.GetName().String(), &accountID, nil, nil, nil, &tsNow,
)
if err != nil {
return nil, err
}
return account, nil
}

View File

@@ -1,94 +1 @@
package cloudintegrations
import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
"github.com/SigNoz/signoz/pkg/types"
)
type ServiceSummary struct {
services.Metadata
Config *types.CloudServiceConfig `json:"config"`
}
type ServiceDetails struct {
services.Definition
Config *types.CloudServiceConfig `json:"config"`
ConnectionStatus *ServiceConnectionStatus `json:"status,omitempty"`
}
type AccountStatus struct {
Integration AccountIntegrationStatus `json:"integration"`
}
type AccountIntegrationStatus struct {
LastHeartbeatTsMillis *int64 `json:"last_heartbeat_ts_ms"`
}
type LogsConfig struct {
Enabled bool `json:"enabled"`
S3Buckets map[string][]string `json:"s3_buckets,omitempty"`
}
type MetricsConfig struct {
Enabled bool `json:"enabled"`
}
type ServiceConnectionStatus struct {
Logs *SignalConnectionStatus `json:"logs"`
Metrics *SignalConnectionStatus `json:"metrics"`
}
type SignalConnectionStatus struct {
LastReceivedTsMillis int64 `json:"last_received_ts_ms"` // epoch milliseconds
LastReceivedFrom string `json:"last_received_from"` // resource identifier
}
type CompiledCollectionStrategy = services.CollectionStrategy
func NewCompiledCollectionStrategy(provider string) (*CompiledCollectionStrategy, error) {
if provider == "aws" {
return &CompiledCollectionStrategy{
Provider: "aws",
AWSMetrics: &services.AWSMetricsStrategy{},
AWSLogs: &services.AWSLogsStrategy{},
}, nil
}
return nil, errors.NewNotFoundf(services.CodeUnsupportedCloudProvider, "unsupported cloud provider: %s", provider)
}
// Helper for accumulating strategies for enabled services.
func AddServiceStrategy(serviceType string, cs *CompiledCollectionStrategy,
definitionStrat *services.CollectionStrategy, config *types.CloudServiceConfig) error {
if definitionStrat.Provider != cs.Provider {
return errors.NewInternalf(CodeMismatchCloudProvider, "can't add %s service strategy to compiled strategy for %s",
definitionStrat.Provider, cs.Provider)
}
if cs.Provider == "aws" {
if config.Logs != nil && config.Logs.Enabled {
if serviceType == services.S3Sync {
// S3 bucket sync; No cloudwatch logs are appended for this service type;
// Though definition is populated with a custom cloudwatch group that helps in calculating logs connection status
cs.S3Buckets = config.Logs.S3Buckets
} else if definitionStrat.AWSLogs != nil { // services that includes a logs subscription
cs.AWSLogs.Subscriptions = append(
cs.AWSLogs.Subscriptions,
definitionStrat.AWSLogs.Subscriptions...,
)
}
}
if config.Metrics != nil && config.Metrics.Enabled && definitionStrat.AWSMetrics != nil {
cs.AWSMetrics.StreamFilters = append(
cs.AWSMetrics.StreamFilters,
definitionStrat.AWSMetrics.StreamFilters...,
)
}
return nil
}
return errors.NewNotFoundf(services.CodeUnsupportedCloudProvider, "unsupported cloud provider: %s", cs.Provider)
}

View File

@@ -0,0 +1,30 @@
package cloudintegrations
import (
"log/slog"
"github.com/SigNoz/signoz/pkg/querier"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/implawsprovider"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/implazureprovider"
integrationstore "github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/store"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/integrationstypes"
)
func NewCloudProviderRegistry(
logger *slog.Logger,
store sqlstore.SQLStore,
querier querier.Querier,
) map[integrationstypes.CloudProviderType]integrationstypes.CloudProvider {
registry := make(map[integrationstypes.CloudProviderType]integrationstypes.CloudProvider)
accountsRepo := integrationstore.NewCloudProviderAccountsRepository(store)
serviceConfigRepo := integrationstore.NewServiceConfigRepository(store)
awsProviderImpl := implawsprovider.NewAWSCloudProvider(logger, accountsRepo, serviceConfigRepo, querier)
registry[integrationstypes.CloudProviderAWS] = awsProviderImpl
azureProviderImpl := implazureprovider.NewAzureCloudProvider(logger, accountsRepo, serviceConfigRepo, querier)
registry[integrationstypes.CloudProviderAzure] = azureProviderImpl
return registry
}

View File

@@ -148,6 +148,146 @@
"name": "aws_ApiGateway_Latency_sum",
"unit": "Milliseconds",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_4xx_sum",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_4xx_max",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_4xx_min",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_4xx_count",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_5xx_sum",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_5xx_max",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_5xx_min",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_5xx_count",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_DataProcessed_sum",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_DataProcessed_max",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_DataProcessed_min",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_DataProcessed_count",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ExecutionError_sum",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ExecutionError_max",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ExecutionError_min",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ExecutionError_count",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ClientError_sum",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ClientError_max",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ClientError_min",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ClientError_count",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_IntegrationError_sum",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_IntegrationError_max",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_IntegrationError_min",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_IntegrationError_count",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ConnectCount_sum",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ConnectCount_max",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ConnectCount_min",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ConnectCount_count",
"unit": "Count",
"type": "Gauge"
}
],
"logs": [

View File

@@ -1928,7 +1928,7 @@
"unit": "Percent",
"type": "Gauge",
"description": ""
}
}
]
},
"telemetry_collection_strategy": {
@@ -1951,4 +1951,4 @@
}
]
}
}
}

View File

@@ -0,0 +1 @@
<svg id="f2f04349-8aee-4413-84c9-a9053611b319" xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 18 18"><defs><linearGradient id="ad4c4f96-09aa-4f91-ba10-5cb8ad530f74" x1="9" y1="15.83" x2="9" y2="5.79" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b3b3b3" /><stop offset="0.26" stop-color="#c1c1c1" /><stop offset="1" stop-color="#e6e6e6" /></linearGradient></defs><title>Icon-storage-86</title><path d="M.5,5.79h17a0,0,0,0,1,0,0v9.48a.57.57,0,0,1-.57.57H1.07a.57.57,0,0,1-.57-.57V5.79A0,0,0,0,1,.5,5.79Z" fill="url(#ad4c4f96-09aa-4f91-ba10-5cb8ad530f74)" /><path d="M1.07,2.17H16.93a.57.57,0,0,1,.57.57V5.79a0,0,0,0,1,0,0H.5a0,0,0,0,1,0,0V2.73A.57.57,0,0,1,1.07,2.17Z" fill="#37c2b1" /><path d="M2.81,6.89H15.18a.27.27,0,0,1,.26.27v1.4a.27.27,0,0,1-.26.27H2.81a.27.27,0,0,1-.26-.27V7.16A.27.27,0,0,1,2.81,6.89Z" fill="#fff" /><path d="M2.82,9.68H15.19a.27.27,0,0,1,.26.27v1.41a.27.27,0,0,1-.26.27H2.82a.27.27,0,0,1-.26-.27V10A.27.27,0,0,1,2.82,9.68Z" fill="#37c2b1" /><path d="M2.82,12.5H15.19a.27.27,0,0,1,.26.27v1.41a.27.27,0,0,1-.26.27H2.82a.27.27,0,0,1-.26-.27V12.77A.27.27,0,0,1,2.82,12.5Z" fill="#258277" /></svg>

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

@@ -0,0 +1,252 @@
{
"id": "blobstorage",
"title": "Blob Storage",
"icon": "file://icon.svg",
"overview": "file://overview.md",
"supported_signals": {
"metrics": true,
"logs": true
},
"data_collected": {
"metrics": [
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
}
],
"logs": [
{
"name": "placeholder_log_1",
"path": "placeholder.path.value",
"type": "string"
},
{
"name": "placeholder_log_1",
"path": "placeholder.path.value",
"type": "string"
},
{
"name": "placeholder_log_1",
"path": "placeholder.path.value",
"type": "string"
},
{
"name": "placeholder_log_1",
"path": "placeholder.path.value",
"type": "string"
}
]
},
"telemetry_collection_strategy": {
"azure_metrics": [
{
"category_type": "metrics",
"name": "Capacity"
},
{
"category_type": "metrics",
"name": "Transaction"
}
],
"azure_logs": [
{
"category_type": "logs",
"name": "StorageRead"
},
{
"category_type": "logs",
"name": "StorageWrite"
},
{
"category_type": "logs",
"name": "StorageDelete"
}
]
},
"assets": {
"dashboards": [
{
"id": "overview",
"title": "Blob Storage Overview",
"description": "Overview of Blob Storage",
"definition": "file://assets/dashboards/overview.json"
}
]
}
}

View File

@@ -0,0 +1,2 @@
Monitor Azure Blob Storage with SigNoz
Collect key Blob Storage metrics and view them with an out of the box dashboard.

View File

@@ -0,0 +1 @@
<svg id="f2f04349-8aee-4413-84c9-a9053611b319" xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 18 18"><defs><linearGradient id="ad4c4f96-09aa-4f91-ba10-5cb8ad530f74" x1="9" y1="15.83" x2="9" y2="5.79" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b3b3b3" /><stop offset="0.26" stop-color="#c1c1c1" /><stop offset="1" stop-color="#e6e6e6" /></linearGradient></defs><title>Icon-storage-86</title><path d="M.5,5.79h17a0,0,0,0,1,0,0v9.48a.57.57,0,0,1-.57.57H1.07a.57.57,0,0,1-.57-.57V5.79A0,0,0,0,1,.5,5.79Z" fill="url(#ad4c4f96-09aa-4f91-ba10-5cb8ad530f74)" /><path d="M1.07,2.17H16.93a.57.57,0,0,1,.57.57V5.79a0,0,0,0,1,0,0H.5a0,0,0,0,1,0,0V2.73A.57.57,0,0,1,1.07,2.17Z" fill="#37c2b1" /><path d="M2.81,6.89H15.18a.27.27,0,0,1,.26.27v1.4a.27.27,0,0,1-.26.27H2.81a.27.27,0,0,1-.26-.27V7.16A.27.27,0,0,1,2.81,6.89Z" fill="#fff" /><path d="M2.82,9.68H15.19a.27.27,0,0,1,.26.27v1.41a.27.27,0,0,1-.26.27H2.82a.27.27,0,0,1-.26-.27V10A.27.27,0,0,1,2.82,9.68Z" fill="#37c2b1" /><path d="M2.82,12.5H15.19a.27.27,0,0,1,.26.27v1.41a.27.27,0,0,1-.26.27H2.82a.27.27,0,0,1-.26-.27V12.77A.27.27,0,0,1,2.82,12.5Z" fill="#258277" /></svg>

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

@@ -0,0 +1,247 @@
{
"id": "frontdoor",
"title": "Front Door",
"icon": "file://icon.svg",
"overview": "file://overview.md",
"supported_signals": {
"metrics": true,
"logs": true
},
"data_collected": {
"metrics": [
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
}
],
"logs": [
{
"name": "placeholder_log_1",
"path": "placeholder.path.value",
"type": "string"
},
{
"name": "placeholder_log_1",
"path": "placeholder.path.value",
"type": "string"
},
{
"name": "placeholder_log_1",
"path": "placeholder.path.value",
"type": "string"
}
]
},
"telemetry_collection_strategy": {
"azure_metrics": [
{
"category_type": "metrics",
"name": "Capacity"
},
{
"category_type": "metrics",
"name": "Transaction"
}
],
"azure_logs": [
{
"category_type": "logs",
"name": "StorageRead"
},
{
"category_type": "logs",
"name": "StorageWrite"
},
{
"category_type": "logs",
"name": "StorageDelete"
}
]
},
"assets": {
"dashboards": [
{
"id": "overview",
"title": "Front Door Overview",
"description": "Overview of Blob Storage",
"definition": "file://assets/dashboards/overview.json"
}
]
}
}

View File

@@ -0,0 +1,2 @@
Monitor Azure Front Door with SigNoz
Collect key Front Door metrics and view them with an out of the box dashboard.

View File

@@ -1,91 +0,0 @@
package services
import (
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
)
type Metadata struct {
Id string `json:"id"`
Title string `json:"title"`
Icon string `json:"icon"`
}
type Definition struct {
Metadata
Overview string `json:"overview"` // markdown
Assets Assets `json:"assets"`
SupportedSignals SupportedSignals `json:"supported_signals"`
DataCollected DataCollected `json:"data_collected"`
Strategy *CollectionStrategy `json:"telemetry_collection_strategy"`
}
type Assets struct {
Dashboards []Dashboard `json:"dashboards"`
}
type SupportedSignals struct {
Logs bool `json:"logs"`
Metrics bool `json:"metrics"`
}
type DataCollected struct {
Logs []CollectedLogAttribute `json:"logs"`
Metrics []CollectedMetric `json:"metrics"`
}
type CollectedLogAttribute struct {
Name string `json:"name"`
Path string `json:"path"`
Type string `json:"type"`
}
type CollectedMetric struct {
Name string `json:"name"`
Type string `json:"type"`
Unit string `json:"unit"`
Description string `json:"description"`
}
type CollectionStrategy struct {
Provider string `json:"provider"`
AWSMetrics *AWSMetricsStrategy `json:"aws_metrics,omitempty"`
AWSLogs *AWSLogsStrategy `json:"aws_logs,omitempty"`
S3Buckets map[string][]string `json:"s3_buckets,omitempty"` // Only available in S3 Sync Service Type
}
type AWSMetricsStrategy struct {
// to be used as https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-metricstream.html#cfn-cloudwatch-metricstream-includefilters
StreamFilters []struct {
// json tags here are in the shape expected by AWS API as detailed at
// https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudwatch-metricstream-metricstreamfilter.html
Namespace string `json:"Namespace"`
MetricNames []string `json:"MetricNames,omitempty"`
} `json:"cloudwatch_metric_stream_filters"`
}
type AWSLogsStrategy struct {
Subscriptions []struct {
// subscribe to all logs groups with specified prefix.
// eg: `/aws/rds/`
LogGroupNamePrefix string `json:"log_group_name_prefix"`
// https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html
// "" implies no filtering is required.
FilterPattern string `json:"filter_pattern"`
} `json:"cloudwatch_logs_subscriptions"`
}
type Dashboard struct {
Id string `json:"id"`
Url string `json:"url"`
Title string `json:"title"`
Description string `json:"description"`
Image string `json:"image"`
Definition *dashboardtypes.StorableDashboardData `json:"definition,omitempty"`
}

View File

@@ -2,128 +2,110 @@ package services
import (
"bytes"
"context"
"embed"
"encoding/json"
"fmt"
"io/fs"
"path"
"sort"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types/integrationstypes"
"github.com/SigNoz/signoz/pkg/valuer"
koanfJson "github.com/knadh/koanf/parsers/json"
"golang.org/x/exp/maps"
)
const (
S3Sync = "s3sync"
)
var (
CodeUnsupportedCloudProvider = errors.MustNewCode("unsupported_cloud_provider")
CodeUnsupportedServiceType = errors.MustNewCode("unsupported_service_type")
CodeServiceDefinitionNotFound = errors.MustNewCode("service_definition_not_dound")
)
func List(cloudProvider string) ([]Definition, *model.ApiError) {
cloudServices, found := supportedServices[cloudProvider]
if !found || cloudServices == nil {
return nil, model.NotFoundError(fmt.Errorf(
"unsupported cloud provider: %s", cloudProvider,
))
}
services := maps.Values(cloudServices)
sort.Slice(services, func(i, j int) bool {
return services[i].Id < services[j].Id
})
return services, nil
type ServicesProvider[T integrationstypes.Definition] struct {
definitions map[string]T
}
func Map(cloudProvider string) (map[string]Definition, error) {
cloudServices, found := supportedServices[cloudProvider]
if !found || cloudServices == nil {
return nil, errors.Newf(errors.TypeNotFound, CodeUnsupportedCloudProvider, "unsupported cloud provider: %s", cloudProvider)
func (a *ServicesProvider[T]) ListServiceDefinitions(ctx context.Context) (map[string]T, error) {
return a.definitions, nil
}
func (a *ServicesProvider[T]) GetServiceDefinition(ctx context.Context, serviceName string) (T, error) {
def, ok := a.definitions[serviceName]
if !ok {
return *new(T), errors.NewNotFoundf(CodeServiceDefinitionNotFound, "azure service definition not found: %s", serviceName)
}
return def, nil
}
func NewAWSCloudProviderServices() (*ServicesProvider[*integrationstypes.AWSDefinition], error) {
definitions, err := readAllServiceDefinitions(integrationstypes.CloudProviderAWS)
if err != nil {
return nil, err
}
serviceDefinitions := make(map[string]*integrationstypes.AWSDefinition)
for id, def := range definitions {
typedDef, ok := def.(*integrationstypes.AWSDefinition)
if !ok {
return nil, fmt.Errorf("invalid type for AWS service definition %s", id)
}
serviceDefinitions[id] = typedDef
}
return &ServicesProvider[*integrationstypes.AWSDefinition]{
definitions: serviceDefinitions,
}, nil
}
func NewAzureCloudProviderServices() (*ServicesProvider[*integrationstypes.AzureDefinition], error) {
definitions, err := readAllServiceDefinitions(integrationstypes.CloudProviderAzure)
if err != nil {
return nil, err
}
serviceDefinitions := make(map[string]*integrationstypes.AzureDefinition)
for id, def := range definitions {
typedDef, ok := def.(*integrationstypes.AzureDefinition)
if !ok {
return nil, fmt.Errorf("invalid type for Azure service definition %s", id)
}
serviceDefinitions[id] = typedDef
}
return &ServicesProvider[*integrationstypes.AzureDefinition]{
definitions: serviceDefinitions,
}, nil
}
// End of API. Logic for reading service definition files follows
//go:embed definitions/*
var definitionFiles embed.FS
func readAllServiceDefinitions(cloudProvider valuer.String) (map[string]any, error) {
rootDirName := "definitions"
cloudProviderDirPath := path.Join(rootDirName, cloudProvider.String())
cloudServices, err := readServiceDefinitionsFromDir(cloudProvider, cloudProviderDirPath)
if err != nil {
return nil, err
}
if len(cloudServices) < 1 {
return nil, errors.NewInternalf(errors.CodeInternal, "no service definitions found in %s", cloudProviderDirPath)
}
return cloudServices, nil
}
func GetServiceDefinition(cloudProvider, serviceType string) (*Definition, error) {
cloudServices := supportedServices[cloudProvider]
if cloudServices == nil {
return nil, errors.Newf(errors.TypeNotFound, CodeUnsupportedCloudProvider, "unsupported cloud provider: %s", cloudProvider)
}
svc, exists := cloudServices[serviceType]
if !exists {
return nil, errors.Newf(errors.TypeNotFound, CodeUnsupportedServiceType, "%s service not found: %s", cloudProvider, serviceType)
}
return &svc, nil
}
// End of API. Logic for reading service definition files follows
// Service details read from ./serviceDefinitions
// { "providerName": { "service_id": {...}} }
var supportedServices map[string]map[string]Definition
func init() {
err := readAllServiceDefinitions()
if err != nil {
panic(fmt.Errorf(
"couldn't read cloud service definitions: %w", err,
))
}
}
//go:embed definitions/*
var definitionFiles embed.FS
func readAllServiceDefinitions() error {
supportedServices = map[string]map[string]Definition{}
rootDirName := "definitions"
cloudProviderDirs, err := fs.ReadDir(definitionFiles, rootDirName)
if err != nil {
return fmt.Errorf("couldn't read dirs in %s: %w", rootDirName, err)
}
for _, d := range cloudProviderDirs {
if !d.IsDir() {
continue
}
cloudProvider := d.Name()
cloudProviderDirPath := path.Join(rootDirName, cloudProvider)
cloudServices, err := readServiceDefinitionsFromDir(cloudProvider, cloudProviderDirPath)
if err != nil {
return fmt.Errorf("couldn't read %s service definitions: %w", cloudProvider, err)
}
if len(cloudServices) < 1 {
return fmt.Errorf("no %s services could be read", cloudProvider)
}
supportedServices[cloudProvider] = cloudServices
}
return nil
}
func readServiceDefinitionsFromDir(cloudProvider string, cloudProviderDirPath string) (
map[string]Definition, error,
) {
func readServiceDefinitionsFromDir(cloudProvider valuer.String, cloudProviderDirPath string) (map[string]any, error) {
svcDefDirs, err := fs.ReadDir(definitionFiles, cloudProviderDirPath)
if err != nil {
return nil, fmt.Errorf("couldn't list integrations dirs: %w", err)
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't list integrations dirs")
}
svcDefs := map[string]Definition{}
svcDefs := make(map[string]any)
for _, d := range svcDefDirs {
if !d.IsDir() {
@@ -133,103 +115,73 @@ func readServiceDefinitionsFromDir(cloudProvider string, cloudProviderDirPath st
svcDirPath := path.Join(cloudProviderDirPath, d.Name())
s, err := readServiceDefinition(cloudProvider, svcDirPath)
if err != nil {
return nil, fmt.Errorf("couldn't read svc definition for %s: %w", d.Name(), err)
return nil, err
}
_, exists := svcDefs[s.Id]
_, exists := svcDefs[s.GetId()]
if exists {
return nil, fmt.Errorf(
"duplicate service definition for id %s at %s", s.Id, d.Name(),
)
return nil, errors.NewInternalf(errors.CodeInternal, "duplicate service definition for id %s at %s", s.GetId(), d.Name())
}
svcDefs[s.Id] = *s
svcDefs[s.GetId()] = s
}
return svcDefs, nil
}
func readServiceDefinition(cloudProvider string, svcDirpath string) (*Definition, error) {
func readServiceDefinition(cloudProvider valuer.String, svcDirpath string) (integrationstypes.Definition, error) {
integrationJsonPath := path.Join(svcDirpath, "integration.json")
serializedSpec, err := definitionFiles.ReadFile(integrationJsonPath)
if err != nil {
return nil, fmt.Errorf(
"couldn't find integration.json in %s: %w",
svcDirpath, err,
)
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't read integration definition in %s", svcDirpath)
}
integrationSpec, err := koanfJson.Parser().Unmarshal(serializedSpec)
if err != nil {
return nil, fmt.Errorf(
"couldn't parse integration.json from %s: %w",
integrationJsonPath, err,
)
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't parse integration definition in %s", svcDirpath)
}
hydrated, err := integrations.HydrateFileUris(
integrationSpec, definitionFiles, svcDirpath,
)
hydrated, err := integrations.HydrateFileUris(integrationSpec, definitionFiles, svcDirpath)
if err != nil {
return nil, fmt.Errorf(
"couldn't hydrate files referenced in service definition %s: %w",
integrationJsonPath, err,
)
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't hydrate integration definition in %s", svcDirpath)
}
hydratedSpec := hydrated.(map[string]any)
serviceDef, err := ParseStructWithJsonTagsFromMap[Definition](hydratedSpec)
if err != nil {
return nil, fmt.Errorf(
"couldn't parse hydrated JSON spec read from %s: %w",
integrationJsonPath, err,
)
var serviceDef integrationstypes.Definition
switch cloudProvider {
case integrationstypes.CloudProviderAWS:
serviceDef = &integrationstypes.AWSDefinition{}
case integrationstypes.CloudProviderAzure:
serviceDef = &integrationstypes.AzureDefinition{}
default:
// ideally this shouldn't happen hence throwing internal error
return nil, errors.NewInternalf(errors.CodeInternal, "unsupported cloud provider: %s", cloudProvider)
}
err = validateServiceDefinition(serviceDef)
err = parseStructWithJsonTagsFromMap(hydratedSpec, serviceDef)
if err != nil {
return nil, fmt.Errorf("invalid service definition %s: %w", serviceDef.Id, err)
return nil, err
}
err = serviceDef.Validate()
if err != nil {
return nil, err
}
serviceDef.Strategy.Provider = cloudProvider
return serviceDef, nil
}
func validateServiceDefinition(s *Definition) error {
// Validate dashboard data
seenDashboardIds := map[string]interface{}{}
for _, dd := range s.Assets.Dashboards {
if _, seen := seenDashboardIds[dd.Id]; seen {
return fmt.Errorf("multiple dashboards found with id %s", dd.Id)
}
seenDashboardIds[dd.Id] = nil
}
if s.Strategy == nil {
return fmt.Errorf("telemetry_collection_strategy is required")
}
// potentially more to follow
return nil
}
func ParseStructWithJsonTagsFromMap[StructType any](data map[string]any) (
*StructType, error,
) {
func parseStructWithJsonTagsFromMap(data map[string]any, target interface{}) error {
mapJson, err := json.Marshal(data)
if err != nil {
return nil, fmt.Errorf("couldn't marshal map to json: %w", err)
return errors.WrapInternalf(err, errors.CodeInternal, "couldn't marshal service definition json data")
}
var res StructType
decoder := json.NewDecoder(bytes.NewReader(mapJson))
decoder.DisallowUnknownFields()
err = decoder.Decode(&res)
err = decoder.Decode(target)
if err != nil {
return nil, fmt.Errorf("couldn't unmarshal json back to struct: %w", err)
return errors.WrapInternalf(err, errors.CodeInternal, "couldn't unmarshal service definition json data")
}
return &res, nil
return nil
}

View File

@@ -1,35 +1,3 @@
package services
import (
"testing"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/stretchr/testify/require"
)
func TestAvailableServices(t *testing.T) {
require := require.New(t)
// should be able to list available services.
_, apiErr := List("bad-cloud-provider")
require.NotNil(apiErr)
require.Equal(model.ErrorNotFound, apiErr.Type())
awsSvcs, apiErr := List("aws")
require.Nil(apiErr)
require.Greater(len(awsSvcs), 0)
// should be able to get details of a service
_, err := GetServiceDefinition(
"aws", "bad-service-id",
)
require.NotNil(err)
require.True(errors.Ast(err, errors.TypeNotFound))
svc, err := GetServiceDefinition(
"aws", awsSvcs[0].Id,
)
require.Nil(err)
require.Equal(*svc, awsSvcs[0])
}
// TODO: add more tests for services package

View File

@@ -1,55 +1,57 @@
package cloudintegrations
package store
import (
"context"
"database/sql"
"fmt"
"log/slog"
"strings"
"time"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/integrationstypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type cloudProviderAccountsRepository interface {
listConnected(ctx context.Context, orgId string, provider string) ([]types.CloudIntegration, *model.ApiError)
var (
CodeCloudIntegrationAccountNotFound errors.Code = errors.MustNewCode("cloud_integration_account_not_found")
)
get(ctx context.Context, orgId string, provider string, id string) (*types.CloudIntegration, *model.ApiError)
type CloudProviderAccountsRepository interface {
ListConnected(ctx context.Context, orgId string, provider string) ([]integrationstypes.CloudIntegration, error)
getConnectedCloudAccount(ctx context.Context, orgId string, provider string, accountID string) (*types.CloudIntegration, *model.ApiError)
Get(ctx context.Context, orgId string, provider string, id string) (*integrationstypes.CloudIntegration, error)
GetConnectedCloudAccount(ctx context.Context, orgId, provider string, accountID string) (*integrationstypes.CloudIntegration, error)
// Insert an account or update it by (cloudProvider, id)
// for specified non-empty fields
upsert(
Upsert(
ctx context.Context,
orgId string,
provider string,
id *string,
config *types.AccountConfig,
config []byte,
accountId *string,
agentReport *types.AgentReport,
agentReport *integrationstypes.AgentReport,
removedAt *time.Time,
) (*types.CloudIntegration, *model.ApiError)
) (*integrationstypes.CloudIntegration, error)
}
func newCloudProviderAccountsRepository(store sqlstore.SQLStore) (
*cloudProviderAccountsSQLRepository, error,
) {
return &cloudProviderAccountsSQLRepository{
store: store,
}, nil
func NewCloudProviderAccountsRepository(store sqlstore.SQLStore) CloudProviderAccountsRepository {
return &cloudProviderAccountsSQLRepository{store: store}
}
type cloudProviderAccountsSQLRepository struct {
store sqlstore.SQLStore
}
func (r *cloudProviderAccountsSQLRepository) listConnected(
func (r *cloudProviderAccountsSQLRepository) ListConnected(
ctx context.Context, orgId string, cloudProvider string,
) ([]types.CloudIntegration, *model.ApiError) {
accounts := []types.CloudIntegration{}
) ([]integrationstypes.CloudIntegration, error) {
accounts := []integrationstypes.CloudIntegration{}
err := r.store.BunDB().NewSelect().
Model(&accounts).
@@ -62,18 +64,17 @@ func (r *cloudProviderAccountsSQLRepository) listConnected(
Scan(ctx)
if err != nil {
return nil, model.InternalError(fmt.Errorf(
"could not query connected cloud accounts: %w", err,
))
slog.ErrorContext(ctx, "error querying connected cloud accounts", "error", err)
return nil, errors.WrapInternalf(err, errors.CodeInternal, "could not query connected cloud accounts")
}
return accounts, nil
}
func (r *cloudProviderAccountsSQLRepository) get(
func (r *cloudProviderAccountsSQLRepository) Get(
ctx context.Context, orgId string, provider string, id string,
) (*types.CloudIntegration, *model.ApiError) {
var result types.CloudIntegration
) (*integrationstypes.CloudIntegration, error) {
var result integrationstypes.CloudIntegration
err := r.store.BunDB().NewSelect().
Model(&result).
@@ -82,23 +83,25 @@ func (r *cloudProviderAccountsSQLRepository) get(
Where("id = ?", id).
Scan(ctx)
if err == sql.ErrNoRows {
return nil, model.NotFoundError(fmt.Errorf(
"couldn't find account with Id %s", id,
))
} else if err != nil {
return nil, model.InternalError(fmt.Errorf(
"couldn't query cloud provider accounts: %w", err,
))
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, errors.WrapNotFoundf(
err,
CodeCloudIntegrationAccountNotFound,
"couldn't find account with Id %s", id,
)
}
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't query cloud provider account")
}
return &result, nil
}
func (r *cloudProviderAccountsSQLRepository) getConnectedCloudAccount(
func (r *cloudProviderAccountsSQLRepository) GetConnectedCloudAccount(
ctx context.Context, orgId string, provider string, accountId string,
) (*types.CloudIntegration, *model.ApiError) {
var result types.CloudIntegration
) (*integrationstypes.CloudIntegration, error) {
var result integrationstypes.CloudIntegration
err := r.store.BunDB().NewSelect().
Model(&result).
@@ -109,29 +112,25 @@ func (r *cloudProviderAccountsSQLRepository) getConnectedCloudAccount(
Where("removed_at is NULL").
Scan(ctx)
if err == sql.ErrNoRows {
return nil, model.NotFoundError(fmt.Errorf(
"couldn't find connected cloud account %s", accountId,
))
if errors.Is(err, sql.ErrNoRows) {
return nil, errors.WrapNotFoundf(err, CodeCloudIntegrationAccountNotFound, "couldn't find connected cloud account %s", accountId)
} else if err != nil {
return nil, model.InternalError(fmt.Errorf(
"couldn't query cloud provider accounts: %w", err,
))
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't query cloud provider account")
}
return &result, nil
}
func (r *cloudProviderAccountsSQLRepository) upsert(
func (r *cloudProviderAccountsSQLRepository) Upsert(
ctx context.Context,
orgId string,
provider string,
id *string,
config *types.AccountConfig,
config []byte,
accountId *string,
agentReport *types.AgentReport,
agentReport *integrationstypes.AgentReport,
removedAt *time.Time,
) (*types.CloudIntegration, *model.ApiError) {
) (*integrationstypes.CloudIntegration, error) {
// Insert
if id == nil {
temp := valuer.GenerateUUID().StringValue()
@@ -181,7 +180,7 @@ func (r *cloudProviderAccountsSQLRepository) upsert(
)
}
integration := types.CloudIntegration{
integration := integrationstypes.CloudIntegration{
OrgID: orgId,
Provider: provider,
Identifiable: types.Identifiable{ID: valuer.MustNewUUID(*id)},
@@ -189,28 +188,25 @@ func (r *cloudProviderAccountsSQLRepository) upsert(
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
Config: config,
Config: string(config),
AccountID: accountId,
LastAgentReport: agentReport,
RemovedAt: removedAt,
}
_, dbErr := r.store.BunDB().NewInsert().
_, err := r.store.BunDB().NewInsert().
Model(&integration).
On(onConflictClause).
Exec(ctx)
if dbErr != nil {
return nil, model.InternalError(fmt.Errorf(
"could not upsert cloud account record: %w", dbErr,
))
if err != nil {
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't upsert cloud integration account")
}
upsertedAccount, apiErr := r.get(ctx, orgId, provider, *id)
if apiErr != nil {
return nil, model.InternalError(fmt.Errorf(
"couldn't fetch upserted account by id: %w", apiErr.ToError(),
))
upsertedAccount, err := r.Get(ctx, orgId, provider, *id)
if err != nil {
slog.ErrorContext(ctx, "error upserting cloud integration account", "error", err)
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't get upserted cloud integration account")
}
return upsertedAccount, nil

View File

@@ -1,64 +1,63 @@
package cloudintegrations
package store
import (
"context"
"database/sql"
"fmt"
"time"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/integrationstypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
var (
CodeServiceConfigNotFound = errors.MustNewCode("service_config_not_found")
)
type ServiceConfigDatabase interface {
get(
Get(
ctx context.Context,
orgID string,
cloudAccountId string,
serviceType string,
) (*types.CloudServiceConfig, *model.ApiError)
) ([]byte, error)
upsert(
Upsert(
ctx context.Context,
orgID string,
cloudProvider string,
cloudAccountId string,
serviceId string,
config types.CloudServiceConfig,
) (*types.CloudServiceConfig, *model.ApiError)
config []byte,
) ([]byte, error)
getAllForAccount(
GetAllForAccount(
ctx context.Context,
orgID string,
cloudAccountId string,
) (
configsBySvcId map[string]*types.CloudServiceConfig,
apiErr *model.ApiError,
map[string][]byte,
error,
)
}
func newServiceConfigRepository(store sqlstore.SQLStore) (
*serviceConfigSQLRepository, error,
) {
return &serviceConfigSQLRepository{
store: store,
}, nil
func NewServiceConfigRepository(store sqlstore.SQLStore) ServiceConfigDatabase {
return &serviceConfigSQLRepository{store: store}
}
type serviceConfigSQLRepository struct {
store sqlstore.SQLStore
}
func (r *serviceConfigSQLRepository) get(
func (r *serviceConfigSQLRepository) Get(
ctx context.Context,
orgID string,
cloudAccountId string,
serviceType string,
) (*types.CloudServiceConfig, *model.ApiError) {
var result types.CloudIntegrationService
) ([]byte, error) {
var result integrationstypes.CloudIntegrationService
err := r.store.BunDB().NewSelect().
Model(&result).
@@ -67,36 +66,30 @@ func (r *serviceConfigSQLRepository) get(
Where("ci.id = ?", cloudAccountId).
Where("cis.type = ?", serviceType).
Scan(ctx)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, errors.WrapNotFoundf(err, CodeServiceConfigNotFound, "couldn't find config for cloud account %s", cloudAccountId)
}
if err == sql.ErrNoRows {
return nil, model.NotFoundError(fmt.Errorf(
"couldn't find config for cloud account %s",
cloudAccountId,
))
} else if err != nil {
return nil, model.InternalError(fmt.Errorf(
"couldn't query cloud service config: %w", err,
))
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't query cloud service config")
}
return &result.Config, nil
return []byte(result.Config), nil
}
func (r *serviceConfigSQLRepository) upsert(
func (r *serviceConfigSQLRepository) Upsert(
ctx context.Context,
orgID string,
cloudProvider string,
cloudAccountId string,
serviceId string,
config types.CloudServiceConfig,
) (*types.CloudServiceConfig, *model.ApiError) {
config []byte,
) ([]byte, error) {
// get cloud integration id from account id
// if the account is not connected, we don't need to upsert the config
var cloudIntegrationId string
err := r.store.BunDB().NewSelect().
Model((*types.CloudIntegration)(nil)).
Model((*integrationstypes.CloudIntegration)(nil)).
Column("id").
Where("provider = ?", cloudProvider).
Where("account_id = ?", cloudAccountId).
@@ -104,20 +97,24 @@ func (r *serviceConfigSQLRepository) upsert(
Where("removed_at is NULL").
Where("last_agent_report is not NULL").
Scan(ctx, &cloudIntegrationId)
if err != nil {
return nil, model.InternalError(fmt.Errorf(
"couldn't query cloud integration id: %w", err,
))
if errors.Is(err, sql.ErrNoRows) {
return nil, errors.WrapNotFoundf(
err,
CodeCloudIntegrationAccountNotFound,
"couldn't find active cloud integration account",
)
}
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't query cloud integration id")
}
serviceConfig := types.CloudIntegrationService{
serviceConfig := integrationstypes.CloudIntegrationService{
Identifiable: types.Identifiable{ID: valuer.GenerateUUID()},
TimeAuditable: types.TimeAuditable{
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
Config: config,
Config: string(config),
Type: serviceId,
CloudIntegrationID: cloudIntegrationId,
}
@@ -126,21 +123,18 @@ func (r *serviceConfigSQLRepository) upsert(
On("conflict(cloud_integration_id, type) do update set config=excluded.config, updated_at=excluded.updated_at").
Exec(ctx)
if err != nil {
return nil, model.InternalError(fmt.Errorf(
"could not upsert cloud service config: %w", err,
))
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't upsert cloud service config")
}
return &serviceConfig.Config, nil
return config, nil
}
func (r *serviceConfigSQLRepository) getAllForAccount(
func (r *serviceConfigSQLRepository) GetAllForAccount(
ctx context.Context,
orgID string,
cloudAccountId string,
) (map[string]*types.CloudServiceConfig, *model.ApiError) {
serviceConfigs := []types.CloudIntegrationService{}
) (map[string][]byte, error) {
var serviceConfigs []integrationstypes.CloudIntegrationService
err := r.store.BunDB().NewSelect().
Model(&serviceConfigs).
@@ -149,15 +143,13 @@ func (r *serviceConfigSQLRepository) getAllForAccount(
Where("ci.org_id = ?", orgID).
Scan(ctx)
if err != nil {
return nil, model.InternalError(fmt.Errorf(
"could not query service configs from db: %w", err,
))
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't query service configs from db")
}
result := map[string]*types.CloudServiceConfig{}
result := make(map[string][]byte)
for _, r := range serviceConfigs {
result[r.Type] = &r.Config
result[r.Type] = []byte(r.Config)
}
return result, nil

View File

@@ -6,11 +6,7 @@ import (
"database/sql"
"encoding/json"
"fmt"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/modules/thirdpartyapi"
"github.com/SigNoz/signoz/pkg/queryparser"
"log/slog"
"io"
"math"
@@ -25,14 +21,19 @@ import (
"time"
"github.com/SigNoz/signoz/pkg/alertmanager"
"github.com/SigNoz/signoz/pkg/errors"
errorsV2 "github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/http/middleware"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
"github.com/SigNoz/signoz/pkg/modules/thirdpartyapi"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
"github.com/SigNoz/signoz/pkg/query-service/app/metricsexplorer"
"github.com/SigNoz/signoz/pkg/queryparser"
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/types/integrationstypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/prometheus/prometheus/promql"
@@ -44,7 +45,6 @@ import (
"github.com/SigNoz/signoz/pkg/contextlinks"
traceFunnelsModule "github.com/SigNoz/signoz/pkg/modules/tracefunnel"
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
"github.com/SigNoz/signoz/pkg/query-service/app/inframetrics"
queues2 "github.com/SigNoz/signoz/pkg/query-service/app/integrations/messagingQueues/queues"
"github.com/SigNoz/signoz/pkg/query-service/app/logs"
@@ -113,7 +113,7 @@ type APIHandler struct {
IntegrationsController *integrations.Controller
CloudIntegrationsController *cloudintegrations.Controller
cloudIntegrationsRegistry map[integrationstypes.CloudProviderType]integrationstypes.CloudProvider
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
@@ -162,9 +162,6 @@ type APIHandlerOpts struct {
// Integrations
IntegrationsController *integrations.Controller
// Cloud Provider Integrations
CloudIntegrationsController *cloudintegrations.Controller
// Log parsing pipelines
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
@@ -180,6 +177,8 @@ type APIHandlerOpts struct {
QueryParserAPI *queryparser.API
Signoz *signoz.SigNoz
Logger *slog.Logger
}
// NewAPIHandler returns an APIHandler
@@ -215,12 +214,18 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
summaryService := metricsexplorer.NewSummaryService(opts.Reader, opts.RuleManager, opts.Signoz.Modules.Dashboard)
//quickFilterModule := quickfilter.NewAPI(opts.QuickFilterModule)
cloudIntegrationsRegistry := cloudintegrations.NewCloudProviderRegistry(
opts.Logger,
opts.Signoz.SQLStore,
opts.Signoz.Querier,
)
aH := &APIHandler{
reader: opts.Reader,
temporalityMap: make(map[string]map[v3.Temporality]bool),
ruleManager: opts.RuleManager,
IntegrationsController: opts.IntegrationsController,
CloudIntegrationsController: opts.CloudIntegrationsController,
cloudIntegrationsRegistry: cloudIntegrationsRegistry,
LogsParsingPipelineController: opts.LogsParsingPipelineController,
querier: querier,
querierV2: querierv2,
@@ -1217,13 +1222,22 @@ func (aH *APIHandler) Get(rw http.ResponseWriter, r *http.Request) {
}
dashboard := new(dashboardtypes.Dashboard)
if aH.CloudIntegrationsController.IsCloudIntegrationDashboardUuid(id) {
cloudIntegrationDashboard, apiErr := aH.CloudIntegrationsController.GetDashboardById(ctx, orgID, id)
if apiErr != nil {
render.Error(rw, errorsV2.Wrapf(apiErr, errorsV2.TypeInternal, errorsV2.CodeInternal, "failed to get dashboard"))
if integrationstypes.IsCloudIntegrationDashboardUuid(id) {
cloudProvider, err := integrationstypes.GetCloudProviderFromDashboardID(id)
if err != nil {
render.Error(rw, err)
return
}
dashboard = cloudIntegrationDashboard
integrationDashboard, err := aH.cloudIntegrationsRegistry[cloudProvider].GetDashboard(ctx, &integrationstypes.GettableDashboard{
ID: id,
OrgID: orgID,
})
if err != nil {
render.Error(rw, err)
return
}
dashboard = integrationDashboard
} else if aH.IntegrationsController.IsInstalledIntegrationDashboardID(id) {
integrationDashboard, apiErr := aH.IntegrationsController.GetInstalledIntegrationDashboardById(ctx, orgID, id)
if apiErr != nil {
@@ -1287,11 +1301,13 @@ func (aH *APIHandler) List(rw http.ResponseWriter, r *http.Request) {
dashboards = append(dashboards, installedIntegrationDashboards...)
}
cloudIntegrationDashboards, apiErr := aH.CloudIntegrationsController.AvailableDashboards(ctx, orgID)
if apiErr != nil {
zap.L().Error("failed to get dashboards for cloud integrations", zap.Error(apiErr))
} else {
dashboards = append(dashboards, cloudIntegrationDashboards...)
for _, provider := range aH.cloudIntegrationsRegistry {
cloudIntegrationDashboards, err := provider.GetAvailableDashboards(ctx, orgID)
if err != nil {
zap.L().Error("failed to get dashboards for cloud integrations", zap.Error(apiErr))
} else {
dashboards = append(dashboards, cloudIntegrationDashboards...)
}
}
gettableDashboards, err := dashboardtypes.NewGettableDashboardsFromDashboards(dashboards)
@@ -3267,15 +3283,15 @@ func (aH *APIHandler) GetIntegrationConnectionStatus(w http.ResponseWriter, r *h
lookbackSeconds = 15 * 60
}
connectionStatus, apiErr := aH.calculateConnectionStatus(
connectionStatus, err := aH.calculateConnectionStatus(
r.Context(), orgID, connectionTests, lookbackSeconds,
)
if apiErr != nil {
RespondError(w, apiErr, "Failed to calculate integration connection status")
if err != nil {
render.Error(w, err)
return
}
aH.Respond(w, connectionStatus)
render.Success(w, http.StatusOK, connectionStatus)
}
func (aH *APIHandler) calculateConnectionStatus(
@@ -3283,10 +3299,11 @@ func (aH *APIHandler) calculateConnectionStatus(
orgID valuer.UUID,
connectionTests *integrations.IntegrationConnectionTests,
lookbackSeconds int64,
) (*integrations.IntegrationConnectionStatus, *model.ApiError) {
) (*integrations.IntegrationConnectionStatus, error) {
// Calculate connection status for signals in parallel
result := &integrations.IntegrationConnectionStatus{}
// TODO: migrate to errors package
errors := []*model.ApiError{}
var resultLock sync.Mutex
@@ -3484,12 +3501,14 @@ func (aH *APIHandler) UninstallIntegration(w http.ResponseWriter, r *http.Reques
aH.Respond(w, map[string]interface{}{})
}
// cloud provider integrations
// RegisterCloudIntegrationsRoutes register routes for cloud provider integrations
func (aH *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *middleware.AuthZ) {
subRouter := router.PathPrefix("/api/v1/cloud-integrations").Subrouter()
subRouter.Use(middleware.NewRecovery(aH.Signoz.Instrumentation.Logger()).Wrap)
subRouter.HandleFunc(
"/{cloudProvider}/accounts/generate-connection-url", am.EditAccess(aH.CloudIntegrationsGenerateConnectionUrl),
"/{cloudProvider}/accounts/generate-connection-url", am.EditAccess(aH.CloudIntegrationsGenerateConnectionArtifact),
).Methods(http.MethodPost)
subRouter.HandleFunc(
@@ -3523,170 +3542,199 @@ func (aH *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *mi
subRouter.HandleFunc(
"/{cloudProvider}/services/{serviceId}/config", am.EditAccess(aH.CloudIntegrationsUpdateServiceConfig),
).Methods(http.MethodPost)
}
func (aH *APIHandler) CloudIntegrationsListConnectedAccounts(
w http.ResponseWriter, r *http.Request,
) {
cloudProvider := mux.Vars(r)["cloudProvider"]
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
if errv2 != nil {
render.Error(w, errv2)
return
}
resp, apiErr := aH.CloudIntegrationsController.ListConnectedAccounts(
r.Context(), claims.OrgID, cloudProvider,
)
if apiErr != nil {
RespondError(w, apiErr, nil)
return
}
aH.Respond(w, resp)
}
func (aH *APIHandler) CloudIntegrationsGenerateConnectionUrl(
w http.ResponseWriter, r *http.Request,
) {
cloudProvider := mux.Vars(r)["cloudProvider"]
req := cloudintegrations.GenerateConnectionUrlRequest{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
if errv2 != nil {
render.Error(w, errv2)
return
}
result, apiErr := aH.CloudIntegrationsController.GenerateConnectionUrl(
r.Context(), claims.OrgID, cloudProvider, req,
)
if apiErr != nil {
RespondError(w, apiErr, nil)
return
}
aH.Respond(w, result)
}
func (aH *APIHandler) CloudIntegrationsGetAccountStatus(
w http.ResponseWriter, r *http.Request,
) {
cloudProvider := mux.Vars(r)["cloudProvider"]
accountId := mux.Vars(r)["accountId"]
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
if errv2 != nil {
render.Error(w, errv2)
return
}
resp, apiErr := aH.CloudIntegrationsController.GetAccountStatus(
r.Context(), claims.OrgID, cloudProvider, accountId,
)
if apiErr != nil {
RespondError(w, apiErr, nil)
return
}
aH.Respond(w, resp)
}
func (aH *APIHandler) CloudIntegrationsAgentCheckIn(
w http.ResponseWriter, r *http.Request,
) {
cloudProvider := mux.Vars(r)["cloudProvider"]
req := cloudintegrations.AgentCheckInRequest{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
if errv2 != nil {
render.Error(w, errv2)
return
}
result, err := aH.CloudIntegrationsController.CheckInAsAgent(
r.Context(), claims.OrgID, cloudProvider, req,
)
func (aH *APIHandler) CloudIntegrationsGenerateConnectionArtifact(w http.ResponseWriter, r *http.Request) {
cloudProviderString := mux.Vars(r)["cloudProvider"]
cloudProvider, err := integrationstypes.NewCloudProvider(cloudProviderString)
if err != nil {
render.Error(w, err)
return
}
aH.Respond(w, result)
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
reqBody, err := io.ReadAll(r.Body)
if err != nil {
render.Error(w, errors.WrapInternalf(err, errors.CodeInternal, "failed to read request body"))
return
}
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].GenerateConnectionArtifact(r.Context(), &integrationstypes.PostableConnectionArtifact{
OrgID: claims.OrgID,
Data: reqBody,
})
if err != nil {
aH.Signoz.Instrumentation.Logger().ErrorContext(r.Context(),
"failed to generate connection artifact for cloud integration",
slog.String("cloudProvider", cloudProviderString),
slog.String("orgID", claims.OrgID),
)
render.Error(w, err)
return
}
render.Success(w, http.StatusOK, resp)
}
func (aH *APIHandler) CloudIntegrationsUpdateAccountConfig(
w http.ResponseWriter, r *http.Request,
) {
cloudProvider := mux.Vars(r)["cloudProvider"]
func (aH *APIHandler) CloudIntegrationsListConnectedAccounts(w http.ResponseWriter, r *http.Request) {
cloudProviderString := mux.Vars(r)["cloudProvider"]
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
cloudProvider, err := integrationstypes.NewCloudProvider(cloudProviderString)
if err != nil {
render.Error(w, err)
return
}
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].ListConnectedAccounts(r.Context(), claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
render.Success(w, http.StatusOK, resp)
}
func (aH *APIHandler) CloudIntegrationsGetAccountStatus(w http.ResponseWriter, r *http.Request) {
cloudProviderString := mux.Vars(r)["cloudProvider"]
cloudProvider, err := integrationstypes.NewCloudProvider(cloudProviderString)
if err != nil {
render.Error(w, err)
return
}
accountId := mux.Vars(r)["accountId"]
req := cloudintegrations.UpdateAccountConfigRequest{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
if errv2 != nil {
render.Error(w, errv2)
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].GetAccountStatus(r.Context(), claims.OrgID, accountId)
if err != nil {
render.Error(w, err)
return
}
result, apiErr := aH.CloudIntegrationsController.UpdateAccountConfig(
r.Context(), claims.OrgID, cloudProvider, accountId, req,
)
if apiErr != nil {
RespondError(w, apiErr, nil)
return
}
aH.Respond(w, result)
render.Success(w, http.StatusOK, resp)
}
func (aH *APIHandler) CloudIntegrationsDisconnectAccount(
w http.ResponseWriter, r *http.Request,
) {
cloudProvider := mux.Vars(r)["cloudProvider"]
func (aH *APIHandler) CloudIntegrationsAgentCheckIn(w http.ResponseWriter, r *http.Request) {
cloudProviderString := mux.Vars(r)["cloudProvider"]
cloudProvider, err := integrationstypes.NewCloudProvider(cloudProviderString)
if err != nil {
render.Error(w, err)
return
}
req := new(integrationstypes.PostableAgentCheckInPayload)
if err = json.NewDecoder(r.Body).Decode(req); err != nil {
render.Error(w, errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid request body"))
return
}
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
req.OrgID = claims.OrgID
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].AgentCheckIn(r.Context(), req)
if err != nil {
render.Error(w, err)
return
}
render.Success(w, http.StatusOK, resp)
}
func (aH *APIHandler) CloudIntegrationsUpdateAccountConfig(w http.ResponseWriter, r *http.Request) {
cloudProviderString := mux.Vars(r)["cloudProvider"]
cloudProvider, err := integrationstypes.NewCloudProvider(cloudProviderString)
if err != nil {
render.Error(w, err)
return
}
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
accountId := mux.Vars(r)["accountId"]
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
if errv2 != nil {
render.Error(w, errv2)
reqBody, err := io.ReadAll(r.Body)
if err != nil {
render.Error(w, errors.WrapInternalf(err, errors.CodeInternal, "failed to read request body"))
return
}
result, apiErr := aH.CloudIntegrationsController.DisconnectAccount(
r.Context(), claims.OrgID, cloudProvider, accountId,
)
if apiErr != nil {
RespondError(w, apiErr, nil)
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].UpdateAccountConfig(r.Context(), &integrationstypes.PatchableAccountConfig{
OrgID: claims.OrgID,
AccountId: accountId,
Data: reqBody,
})
if err != nil {
render.Error(w, err)
return
}
aH.Respond(w, result)
render.Success(w, http.StatusOK, resp)
return
}
func (aH *APIHandler) CloudIntegrationsListServices(
w http.ResponseWriter, r *http.Request,
) {
cloudProvider := mux.Vars(r)["cloudProvider"]
func (aH *APIHandler) CloudIntegrationsDisconnectAccount(w http.ResponseWriter, r *http.Request) {
cloudProviderString := mux.Vars(r)["cloudProvider"]
cloudProvider, err := integrationstypes.NewCloudProvider(cloudProviderString)
if err != nil {
render.Error(w, err)
return
}
accountId := mux.Vars(r)["accountId"]
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
result, err := aH.cloudIntegrationsRegistry[cloudProvider].DisconnectAccount(r.Context(), claims.OrgID, accountId)
if err != nil {
render.Error(w, err)
return
}
render.Success(w, http.StatusOK, result)
}
func (aH *APIHandler) CloudIntegrationsListServices(w http.ResponseWriter, r *http.Request) {
cloudProviderString := mux.Vars(r)["cloudProvider"]
cloudProvider, err := integrationstypes.NewCloudProvider(cloudProviderString)
if err != nil {
render.Error(w, err)
return
}
var cloudAccountId *string
@@ -3695,26 +3743,22 @@ func (aH *APIHandler) CloudIntegrationsListServices(
cloudAccountId = &cloudAccountIdQP
}
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
if errv2 != nil {
render.Error(w, errv2)
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
resp, apiErr := aH.CloudIntegrationsController.ListServices(
r.Context(), claims.OrgID, cloudProvider, cloudAccountId,
)
if apiErr != nil {
RespondError(w, apiErr, nil)
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].ListServices(r.Context(), claims.OrgID, cloudAccountId)
if err != nil {
render.Error(w, err)
return
}
aH.Respond(w, resp)
render.Success(w, http.StatusOK, resp)
}
func (aH *APIHandler) CloudIntegrationsGetServiceDetails(
w http.ResponseWriter, r *http.Request,
) {
func (aH *APIHandler) CloudIntegrationsGetServiceDetails(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
@@ -3726,7 +3770,14 @@ func (aH *APIHandler) CloudIntegrationsGetServiceDetails(
return
}
cloudProvider := mux.Vars(r)["cloudProvider"]
cloudProviderString := mux.Vars(r)["cloudProvider"]
cloudProvider, err := integrationstypes.NewCloudProvider(cloudProviderString)
if err != nil {
render.Error(w, err)
return
}
serviceId := mux.Vars(r)["serviceId"]
var cloudAccountId *string
@@ -3736,270 +3787,59 @@ func (aH *APIHandler) CloudIntegrationsGetServiceDetails(
cloudAccountId = &cloudAccountIdQP
}
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
if errv2 != nil {
render.Error(w, errv2)
return
}
resp, err := aH.CloudIntegrationsController.GetServiceDetails(
r.Context(), claims.OrgID, cloudProvider, serviceId, cloudAccountId,
)
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].GetServiceDetails(r.Context(), &integrationstypes.GetServiceDetailsReq{
OrgID: orgID,
ServiceId: serviceId,
CloudAccountID: cloudAccountId,
})
if err != nil {
render.Error(w, err)
return
}
// Add connection status for the 2 signals.
if cloudAccountId != nil {
connStatus, apiErr := aH.calculateCloudIntegrationServiceConnectionStatus(
r.Context(), orgID, cloudProvider, *cloudAccountId, resp,
)
if apiErr != nil {
RespondError(w, apiErr, nil)
return
}
resp.ConnectionStatus = connStatus
}
aH.Respond(w, resp)
render.Success(w, http.StatusOK, resp)
return
}
func (aH *APIHandler) calculateCloudIntegrationServiceConnectionStatus(
ctx context.Context,
orgID valuer.UUID,
cloudProvider string,
cloudAccountId string,
svcDetails *cloudintegrations.ServiceDetails,
) (*cloudintegrations.ServiceConnectionStatus, *model.ApiError) {
if cloudProvider != "aws" {
// TODO(Raj): Make connection check generic for all providers in a follow up change
return nil, model.BadRequest(
fmt.Errorf("unsupported cloud provider: %s", cloudProvider),
)
}
func (aH *APIHandler) CloudIntegrationsUpdateServiceConfig(w http.ResponseWriter, r *http.Request) {
cloudProviderString := mux.Vars(r)["cloudProvider"]
telemetryCollectionStrategy := svcDetails.Strategy
if telemetryCollectionStrategy == nil {
return nil, model.InternalError(fmt.Errorf(
"service doesn't have telemetry collection strategy: %s", svcDetails.Id,
))
}
result := &cloudintegrations.ServiceConnectionStatus{}
errors := []*model.ApiError{}
var resultLock sync.Mutex
var wg sync.WaitGroup
// Calculate metrics connection status
if telemetryCollectionStrategy.AWSMetrics != nil {
wg.Add(1)
go func() {
defer wg.Done()
metricsConnStatus, apiErr := aH.calculateAWSIntegrationSvcMetricsConnectionStatus(
ctx, cloudAccountId, telemetryCollectionStrategy.AWSMetrics, svcDetails.DataCollected.Metrics,
)
resultLock.Lock()
defer resultLock.Unlock()
if apiErr != nil {
errors = append(errors, apiErr)
} else {
result.Metrics = metricsConnStatus
}
}()
}
// Calculate logs connection status
if telemetryCollectionStrategy.AWSLogs != nil {
wg.Add(1)
go func() {
defer wg.Done()
logsConnStatus, apiErr := aH.calculateAWSIntegrationSvcLogsConnectionStatus(
ctx, orgID, cloudAccountId, telemetryCollectionStrategy.AWSLogs,
)
resultLock.Lock()
defer resultLock.Unlock()
if apiErr != nil {
errors = append(errors, apiErr)
} else {
result.Logs = logsConnStatus
}
}()
}
wg.Wait()
if len(errors) > 0 {
return nil, errors[0]
}
return result, nil
}
func (aH *APIHandler) calculateAWSIntegrationSvcMetricsConnectionStatus(
ctx context.Context,
cloudAccountId string,
strategy *services.AWSMetricsStrategy,
metricsCollectedBySvc []services.CollectedMetric,
) (*cloudintegrations.SignalConnectionStatus, *model.ApiError) {
if strategy == nil || len(strategy.StreamFilters) < 1 {
return nil, nil
}
expectedLabelValues := map[string]string{
"cloud_provider": "aws",
"cloud_account_id": cloudAccountId,
}
metricsNamespace := strategy.StreamFilters[0].Namespace
metricsNamespaceParts := strings.Split(metricsNamespace, "/")
if len(metricsNamespaceParts) >= 2 {
expectedLabelValues["service_namespace"] = metricsNamespaceParts[0]
expectedLabelValues["service_name"] = metricsNamespaceParts[1]
} else {
// metrics for single word namespaces like "CWAgent" do not
// have the service_namespace label populated
expectedLabelValues["service_name"] = metricsNamespaceParts[0]
}
metricNamesCollectedBySvc := []string{}
for _, cm := range metricsCollectedBySvc {
metricNamesCollectedBySvc = append(metricNamesCollectedBySvc, cm.Name)
}
statusForLastReceivedMetric, apiErr := aH.reader.GetLatestReceivedMetric(
ctx, metricNamesCollectedBySvc, expectedLabelValues,
)
if apiErr != nil {
return nil, apiErr
}
if statusForLastReceivedMetric != nil {
return &cloudintegrations.SignalConnectionStatus{
LastReceivedTsMillis: statusForLastReceivedMetric.LastReceivedTsMillis,
LastReceivedFrom: "signoz-aws-integration",
}, nil
}
return nil, nil
}
func (aH *APIHandler) calculateAWSIntegrationSvcLogsConnectionStatus(
ctx context.Context,
orgID valuer.UUID,
cloudAccountId string,
strategy *services.AWSLogsStrategy,
) (*cloudintegrations.SignalConnectionStatus, *model.ApiError) {
if strategy == nil || len(strategy.Subscriptions) < 1 {
return nil, nil
}
logGroupNamePrefix := strategy.Subscriptions[0].LogGroupNamePrefix
if len(logGroupNamePrefix) < 1 {
return nil, nil
}
logsConnTestFilter := &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{
Key: "cloud.account.id",
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
Operator: "=",
Value: cloudAccountId,
},
{
Key: v3.AttributeKey{
Key: "aws.cloudwatch.log_group_name",
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
Operator: "like",
Value: logGroupNamePrefix + "%",
},
},
}
// TODO(Raj): Receive this as a param from UI in the future.
lookbackSeconds := int64(30 * 60)
qrParams := &v3.QueryRangeParamsV3{
Start: time.Now().UnixMilli() - (lookbackSeconds * 1000),
End: time.Now().UnixMilli(),
CompositeQuery: &v3.CompositeQuery{
PanelType: v3.PanelTypeList,
QueryType: v3.QueryTypeBuilder,
BuilderQueries: map[string]*v3.BuilderQuery{
"A": {
PageSize: 1,
Filters: logsConnTestFilter,
QueryName: "A",
DataSource: v3.DataSourceLogs,
Expression: "A",
AggregateOperator: v3.AggregateOperatorNoOp,
},
},
},
}
queryRes, _, err := aH.querier.QueryRange(
ctx, orgID, qrParams,
)
cloudProvider, err := integrationstypes.NewCloudProvider(cloudProviderString)
if err != nil {
return nil, model.InternalError(fmt.Errorf(
"could not query for integration connection status: %w", err,
))
}
if len(queryRes) > 0 && queryRes[0].List != nil && len(queryRes[0].List) > 0 {
lastLog := queryRes[0].List[0]
return &cloudintegrations.SignalConnectionStatus{
LastReceivedTsMillis: lastLog.Timestamp.UnixMilli(),
LastReceivedFrom: "signoz-aws-integration",
}, nil
render.Error(w, err)
return
}
return nil, nil
}
func (aH *APIHandler) CloudIntegrationsUpdateServiceConfig(
w http.ResponseWriter, r *http.Request,
) {
cloudProvider := mux.Vars(r)["cloudProvider"]
serviceId := mux.Vars(r)["serviceId"]
req := cloudintegrations.UpdateServiceConfigRequest{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
if errv2 != nil {
render.Error(w, errv2)
return
}
result, err := aH.CloudIntegrationsController.UpdateServiceConfig(
r.Context(), claims.OrgID, cloudProvider, serviceId, &req,
)
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
aH.Respond(w, result)
reqBody, err := io.ReadAll(r.Body)
if err != nil {
render.Error(w, errors.WrapInternalf(err,
errors.CodeInternal,
"failed to read update service config request body",
))
return
}
result, err := aH.cloudIntegrationsRegistry[cloudProvider].UpdateServiceConfig(
r.Context(), &integrationstypes.PatchableServiceConfig{
OrgID: claims.OrgID,
ServiceId: serviceId,
Config: reqBody,
},
)
if err != nil {
render.Error(w, err)
return
}
render.Success(w, http.StatusOK, result)
}
// logs

View File

@@ -11,6 +11,7 @@ import (
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
"github.com/SigNoz/signoz/pkg/types/integrationstypes"
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/SigNoz/signoz/pkg/valuer"
@@ -107,7 +108,7 @@ type IntegrationsListItem struct {
type Integration struct {
IntegrationDetails
Installation *types.InstalledIntegration `json:"installation"`
Installation *integrationstypes.InstalledIntegration `json:"installation"`
}
type Manager struct {
@@ -223,7 +224,7 @@ func (m *Manager) InstallIntegration(
ctx context.Context,
orgId string,
integrationId string,
config types.InstalledIntegrationConfig,
config integrationstypes.InstalledIntegrationConfig,
) (*IntegrationsListItem, *model.ApiError) {
integrationDetails, apiErr := m.getIntegrationDetails(ctx, integrationId)
if apiErr != nil {
@@ -429,7 +430,7 @@ func (m *Manager) getInstalledIntegration(
ctx context.Context,
orgId string,
integrationId string,
) (*types.InstalledIntegration, *model.ApiError) {
) (*integrationstypes.InstalledIntegration, *model.ApiError) {
iis, apiErr := m.installedIntegrationsRepo.get(
ctx, orgId, []string{integrationId},
)
@@ -457,7 +458,7 @@ func (m *Manager) getInstalledIntegrations(
return nil, apiErr
}
installedTypes := utils.MapSlice(installations, func(i types.InstalledIntegration) string {
installedTypes := utils.MapSlice(installations, func(i integrationstypes.InstalledIntegration) string {
return i.Type
})
integrationDetails, apiErr := m.availableIntegrationsRepo.get(ctx, installedTypes)

View File

@@ -4,22 +4,22 @@ import (
"context"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/integrationstypes"
)
type InstalledIntegrationsRepo interface {
list(ctx context.Context, orgId string) ([]types.InstalledIntegration, *model.ApiError)
list(ctx context.Context, orgId string) ([]integrationstypes.InstalledIntegration, *model.ApiError)
get(
ctx context.Context, orgId string, integrationTypes []string,
) (map[string]types.InstalledIntegration, *model.ApiError)
) (map[string]integrationstypes.InstalledIntegration, *model.ApiError)
upsert(
ctx context.Context,
orgId string,
integrationType string,
config types.InstalledIntegrationConfig,
) (*types.InstalledIntegration, *model.ApiError)
config integrationstypes.InstalledIntegrationConfig,
) (*integrationstypes.InstalledIntegration, *model.ApiError)
delete(ctx context.Context, orgId string, integrationType string) *model.ApiError
}

View File

@@ -7,6 +7,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/integrationstypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
)
@@ -26,8 +27,8 @@ func NewInstalledIntegrationsSqliteRepo(store sqlstore.SQLStore) (
func (r *InstalledIntegrationsSqliteRepo) list(
ctx context.Context,
orgId string,
) ([]types.InstalledIntegration, *model.ApiError) {
integrations := []types.InstalledIntegration{}
) ([]integrationstypes.InstalledIntegration, *model.ApiError) {
integrations := []integrationstypes.InstalledIntegration{}
err := r.store.BunDB().NewSelect().
Model(&integrations).
@@ -44,8 +45,8 @@ func (r *InstalledIntegrationsSqliteRepo) list(
func (r *InstalledIntegrationsSqliteRepo) get(
ctx context.Context, orgId string, integrationTypes []string,
) (map[string]types.InstalledIntegration, *model.ApiError) {
integrations := []types.InstalledIntegration{}
) (map[string]integrationstypes.InstalledIntegration, *model.ApiError) {
integrations := []integrationstypes.InstalledIntegration{}
typeValues := []interface{}{}
for _, integrationType := range integrationTypes {
@@ -62,7 +63,7 @@ func (r *InstalledIntegrationsSqliteRepo) get(
))
}
result := map[string]types.InstalledIntegration{}
result := map[string]integrationstypes.InstalledIntegration{}
for _, ii := range integrations {
result[ii.Type] = ii
}
@@ -74,10 +75,10 @@ func (r *InstalledIntegrationsSqliteRepo) upsert(
ctx context.Context,
orgId string,
integrationType string,
config types.InstalledIntegrationConfig,
) (*types.InstalledIntegration, *model.ApiError) {
config integrationstypes.InstalledIntegrationConfig,
) (*integrationstypes.InstalledIntegration, *model.ApiError) {
integration := types.InstalledIntegration{
integration := integrationstypes.InstalledIntegration{
Identifiable: types.Identifiable{
ID: valuer.GenerateUUID(),
},
@@ -114,7 +115,7 @@ func (r *InstalledIntegrationsSqliteRepo) delete(
ctx context.Context, orgId string, integrationType string,
) *model.ApiError {
_, dbErr := r.store.BunDB().NewDelete().
Model(&types.InstalledIntegration{}).
Model(&integrationstypes.InstalledIntegration{}).
Where("type = ?", integrationType).
Where("org_id = ?", orgId).
Exec(ctx)

View File

@@ -26,7 +26,6 @@ import (
querierAPI "github.com/SigNoz/signoz/pkg/querier"
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
"github.com/SigNoz/signoz/pkg/query-service/app/opamp"
@@ -71,11 +70,6 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
return nil, err
}
cloudIntegrationsController, err := cloudintegrations.NewController(signoz.SQLStore)
if err != nil {
return nil, err
}
cacheForTraceDetail, err := memorycache.New(context.TODO(), signoz.Instrumentation.ToProviderSettings(), cache.Config{
Provider: "memory",
Memory: cache.Memory{
@@ -127,7 +121,6 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
Reader: reader,
RuleManager: rm,
IntegrationsController: integrationsController,
CloudIntegrationsController: cloudIntegrationsController,
LogsParsingPipelineController: logParsingPipelineController,
FluxInterval: config.Querier.FluxInterval,
AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager),
@@ -135,6 +128,7 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
Signoz: signoz,
QuerierAPI: querierAPI.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.Querier, signoz.Analytics),
QueryParserAPI: queryparser.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.QueryParser),
Logger: signoz.Instrumentation.Logger(),
})
if err != nil {
return nil, err

View File

@@ -1,247 +0,0 @@
package types
import (
"database/sql/driver"
"encoding/json"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/uptrace/bun"
)
type IntegrationUserEmail string
const (
AWSIntegrationUserEmail IntegrationUserEmail = "aws-integration@signoz.io"
)
var AllIntegrationUserEmails = []IntegrationUserEmail{
AWSIntegrationUserEmail,
}
// --------------------------------------------------------------------------
// Normal integration uses just the installed_integration table
// --------------------------------------------------------------------------
type InstalledIntegration struct {
bun.BaseModel `bun:"table:installed_integration"`
Identifiable
Type string `json:"type" bun:"type,type:text,unique:org_id_type"`
Config InstalledIntegrationConfig `json:"config" bun:"config,type:text"`
InstalledAt time.Time `json:"installed_at" bun:"installed_at,default:current_timestamp"`
OrgID string `json:"org_id" bun:"org_id,type:text,unique:org_id_type,references:organizations(id),on_delete:cascade"`
}
type InstalledIntegrationConfig map[string]interface{}
// For serializing from db
func (c *InstalledIntegrationConfig) Scan(src interface{}) error {
var data []byte
switch v := src.(type) {
case []byte:
data = v
case string:
data = []byte(v)
default:
return errors.NewInternalf(errors.CodeInternal, "tried to scan from %T instead of string or bytes", src)
}
return json.Unmarshal(data, c)
}
// For serializing to db
func (c *InstalledIntegrationConfig) Value() (driver.Value, error) {
filterSetJson, err := json.Marshal(c)
if err != nil {
return nil, errors.WrapInternalf(err, errors.CodeInternal, "could not serialize integration config to JSON")
}
return filterSetJson, nil
}
// --------------------------------------------------------------------------
// Cloud integration uses the cloud_integration table
// and cloud_integrations_service table
// --------------------------------------------------------------------------
type CloudIntegration struct {
bun.BaseModel `bun:"table:cloud_integration"`
Identifiable
TimeAuditable
Provider string `json:"provider" bun:"provider,type:text,unique:provider_id"`
Config *AccountConfig `json:"config" bun:"config,type:text"`
AccountID *string `json:"account_id" bun:"account_id,type:text"`
LastAgentReport *AgentReport `json:"last_agent_report" bun:"last_agent_report,type:text"`
RemovedAt *time.Time `json:"removed_at" bun:"removed_at,type:timestamp,nullzero"`
OrgID string `bun:"org_id,type:text,unique:provider_id"`
}
func (a *CloudIntegration) Status() AccountStatus {
status := AccountStatus{}
if a.LastAgentReport != nil {
lastHeartbeat := a.LastAgentReport.TimestampMillis
status.Integration.LastHeartbeatTsMillis = &lastHeartbeat
}
return status
}
func (a *CloudIntegration) Account() Account {
ca := Account{Id: a.ID.StringValue(), Status: a.Status()}
if a.AccountID != nil {
ca.CloudAccountId = *a.AccountID
}
if a.Config != nil {
ca.Config = *a.Config
} else {
ca.Config = DefaultAccountConfig()
}
return ca
}
type Account struct {
Id string `json:"id"`
CloudAccountId string `json:"cloud_account_id"`
Config AccountConfig `json:"config"`
Status AccountStatus `json:"status"`
}
type AccountStatus struct {
Integration AccountIntegrationStatus `json:"integration"`
}
type AccountIntegrationStatus struct {
LastHeartbeatTsMillis *int64 `json:"last_heartbeat_ts_ms"`
}
func DefaultAccountConfig() AccountConfig {
return AccountConfig{
EnabledRegions: []string{},
}
}
type AccountConfig struct {
EnabledRegions []string `json:"regions"`
}
// For serializing from db
func (c *AccountConfig) Scan(src any) error {
var data []byte
switch v := src.(type) {
case []byte:
data = v
case string:
data = []byte(v)
default:
return errors.NewInternalf(errors.CodeInternal, "tried to scan from %T instead of string or bytes", src)
}
return json.Unmarshal(data, c)
}
// For serializing to db
func (c *AccountConfig) Value() (driver.Value, error) {
if c == nil {
return nil, errors.NewInternalf(errors.CodeInternal, "cloud account config is nil")
}
serialized, err := json.Marshal(c)
if err != nil {
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't serialize cloud account config to JSON")
}
// Return as string instead of []byte to ensure PostgreSQL stores as text, not bytea
return string(serialized), nil
}
type AgentReport struct {
TimestampMillis int64 `json:"timestamp_millis"`
Data map[string]any `json:"data"`
}
// For serializing from db
func (r *AgentReport) Scan(src any) error {
var data []byte
switch v := src.(type) {
case []byte:
data = v
case string:
data = []byte(v)
default:
return errors.NewInternalf(errors.CodeInternal, "tried to scan from %T instead of string or bytes", src)
}
return json.Unmarshal(data, r)
}
// For serializing to db
func (r *AgentReport) Value() (driver.Value, error) {
if r == nil {
return nil, errors.NewInternalf(errors.CodeInternal, "agent report is nil")
}
serialized, err := json.Marshal(r)
if err != nil {
return nil, errors.WrapInternalf(
err, errors.CodeInternal, "couldn't serialize agent report to JSON",
)
}
// Return as string instead of []byte to ensure PostgreSQL stores as text, not bytea
return string(serialized), nil
}
type CloudIntegrationService struct {
bun.BaseModel `bun:"table:cloud_integration_service,alias:cis"`
Identifiable
TimeAuditable
Type string `bun:"type,type:text,notnull,unique:cloud_integration_id_type"`
Config CloudServiceConfig `bun:"config,type:text"`
CloudIntegrationID string `bun:"cloud_integration_id,type:text,notnull,unique:cloud_integration_id_type,references:cloud_integrations(id),on_delete:cascade"`
}
type CloudServiceLogsConfig struct {
Enabled bool `json:"enabled"`
S3Buckets map[string][]string `json:"s3_buckets,omitempty"`
}
type CloudServiceMetricsConfig struct {
Enabled bool `json:"enabled"`
}
type CloudServiceConfig struct {
Logs *CloudServiceLogsConfig `json:"logs,omitempty"`
Metrics *CloudServiceMetricsConfig `json:"metrics,omitempty"`
}
// For serializing from db
func (c *CloudServiceConfig) Scan(src any) error {
var data []byte
switch src := src.(type) {
case []byte:
data = src
case string:
data = []byte(src)
default:
return errors.NewInternalf(errors.CodeInternal, "tried to scan from %T instead of string or bytes", src)
}
return json.Unmarshal(data, c)
}
// For serializing to db
func (c *CloudServiceConfig) Value() (driver.Value, error) {
if c == nil {
return nil, errors.NewInternalf(errors.CodeInternal, "cloud service config is nil")
}
serialized, err := json.Marshal(c)
if err != nil {
return nil, errors.WrapInternalf(
err, errors.CodeInternal, "couldn't serialize cloud service config to JSON",
)
}
// Return as string instead of []byte to ensure PostgreSQL stores as text, not bytea
return string(serialized), nil
}

View File

@@ -0,0 +1,497 @@
package integrationstypes
import (
"context"
"database/sql/driver"
"encoding/json"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
)
// Generic utility functions for JSON serialization/deserialization
// UnmarshalJSON is a generic function to unmarshal JSON data into any type
func UnmarshalJSON[T any](src []byte, target *T) error {
err := json.Unmarshal(src, target)
if err != nil {
return errors.WrapInternalf(
err, errors.CodeInternal, "couldn't deserialize JSON",
)
}
return nil
}
// MarshalJSON is a generic function to marshal any type to JSON
func MarshalJSON[T any](source *T) ([]byte, error) {
if source == nil {
return nil, errors.NewInternalf(errors.CodeInternal, "source is nil")
}
serialized, err := json.Marshal(source)
if err != nil {
return nil, errors.WrapInternalf(
err, errors.CodeInternal, "couldn't serialize to JSON",
)
}
return serialized, nil
}
// CloudProvider defines the interface to be implemented by different cloud providers.
// This is generic interface so it will be accepting and returning generic types instead of concrete.
// It's the cloud provider's responsibility to cast them to appropriate types and validate
type CloudProvider interface {
GetName() CloudProviderType
AgentCheckIn(ctx context.Context, req *PostableAgentCheckInPayload) (any, error)
GenerateConnectionArtifact(ctx context.Context, req *PostableConnectionArtifact) (any, error)
GetAccountStatus(ctx context.Context, orgID, accountID string) (*GettableAccountStatus, error)
ListServices(ctx context.Context, orgID string, accountID *string) (any, error) // returns either GettableAWSServices or GettableAzureServices
GetServiceDetails(ctx context.Context, req *GetServiceDetailsReq) (any, error)
ListConnectedAccounts(ctx context.Context, orgID string) (*GettableConnectedAccountsList, error)
GetDashboard(ctx context.Context, req *GettableDashboard) (*dashboardtypes.Dashboard, error)
GetAvailableDashboards(ctx context.Context, orgID valuer.UUID) ([]*dashboardtypes.Dashboard, error)
UpdateAccountConfig(ctx context.Context, req *PatchableAccountConfig) (any, error) // req can be either PatchableAWSAccountConfig or PatchableAzureAccountConfig
UpdateServiceConfig(ctx context.Context, req *PatchableServiceConfig) (any, error)
DisconnectAccount(ctx context.Context, orgID, accountID string) (*CloudIntegration, error)
}
type GettableDashboard struct {
ID string
OrgID valuer.UUID
}
type GettableCloudIntegrationConnectionParams struct {
IngestionUrl string `json:"ingestion_url,omitempty"`
IngestionKey string `json:"ingestion_key,omitempty"`
SigNozAPIUrl string `json:"signoz_api_url,omitempty"`
SigNozAPIKey string `json:"signoz_api_key,omitempty"`
}
type GettableIngestionKey struct {
Name string `json:"name"`
Value string `json:"value"`
// other attributes from gateway response not included here since they are not being used.
}
type GettableIngestionKeysSearch struct {
Status string `json:"status"`
Data []GettableIngestionKey `json:"data"`
Error string `json:"error"`
}
type GettableCreateIngestionKey struct {
Status string `json:"status"`
Data GettableIngestionKey `json:"data"`
Error string `json:"error"`
}
type GettableDeployment struct {
Name string `json:"name"`
ClusterInfo struct {
Region struct {
DNS string `json:"dns"`
} `json:"region"`
} `json:"cluster"`
}
type GettableConnectedAccountsList struct {
Accounts []*Account `json:"accounts"`
}
// SigNozAWSAgentConfig represents requirements for agent deployment in user's AWS account
type SigNozAWSAgentConfig struct {
// The region in which SigNoz agent should be installed.
Region string `json:"region"`
IngestionUrl string `json:"ingestion_url"`
IngestionKey string `json:"ingestion_key"`
SigNozAPIUrl string `json:"signoz_api_url"`
SigNozAPIKey string `json:"signoz_api_key"`
Version string `json:"version,omitempty"`
}
type PostableConnectionArtifact struct {
OrgID string
Data []byte // either PostableAWSConnectionUrl or PostableAzureConnectionCommand
}
type PostableConnectionArtifactTyped[AgentConfigT any, AccountConfigT any] struct {
AccountId *string `json:"account_id,omitempty"` // Optional. To be specified for updates.
AgentConfig *AgentConfigT `json:"agent_config"`
AccountConfig *AccountConfigT `json:"account_config"`
}
type PostableAWSConnectionUrl = PostableConnectionArtifactTyped[SigNozAWSAgentConfig, AWSAccountConfig]
type PostableAzureConnectionCommand = PostableConnectionArtifactTyped[SigNozAzureAgentConfig, AzureAccountConfig]
type SigNozAzureAgentConfig struct {
IngestionUrl string `json:"ingestion_url"`
IngestionKey string `json:"ingestion_key"`
SigNozAPIUrl string `json:"signoz_api_url"`
SigNozAPIKey string `json:"signoz_api_key"`
Version string `json:"version,omitempty"`
}
// GettableConnectionArtifact represents base structure for connection artifacts
type GettableConnectionArtifact[T any] struct {
AccountId string `json:"account_id"`
Artifact T `json:",inline"`
}
type GettableAWSConnectionArtifact struct {
ConnectionUrl string `json:"connection_url"`
}
type GettableAzureConnectionArtifact struct {
AzureShellConnectionCommand string `json:"az_shell_connection_command"`
AzureCliConnectionCommand string `json:"az_cli_connection_command"`
}
type GettableAWSConnectionUrl struct {
AccountId string `json:"account_id"`
ConnectionUrl string `json:"connection_url"`
}
type GettableAzureConnectionCommand struct {
AccountId string `json:"account_id"`
AzureShellConnectionCommand string `json:"az_shell_connection_command"`
AzureCliConnectionCommand string `json:"az_cli_connection_command"`
}
type GettableAccountStatus struct {
Id string `json:"id"`
CloudAccountId *string `json:"cloud_account_id,omitempty"`
Status AccountStatus `json:"status"`
}
type PostableAgentCheckInPayload struct {
ID string `json:"account_id"`
AccountID string `json:"cloud_account_id"`
// Arbitrary cloud specific Agent data
Data map[string]any `json:"data,omitempty"`
OrgID string `json:"-"`
}
type AWSAgentIntegrationConfig struct {
EnabledRegions []string `json:"enabled_regions"`
TelemetryCollectionStrategy *AWSCollectionStrategy `json:"telemetry,omitempty"`
}
type AzureAgentIntegrationConfig struct {
DeploymentRegion string `json:"deployment_region"` // will not be changed once set
EnabledResourceGroups []string `json:"resource_groups"`
// TelemetryCollectionStrategy is map of service to telemetry config
TelemetryCollectionStrategy map[string]*AzureCollectionStrategy `json:"telemetry,omitempty"`
}
type GettableAgentCheckIn[T any] struct {
AccountId string `json:"account_id"`
CloudAccountId string `json:"cloud_account_id"`
RemovedAt *time.Time `json:"removed_at"`
IntegrationConfig T `json:"integration_config"`
}
type GettableAWSAgentCheckIn = GettableAgentCheckIn[AWSAgentIntegrationConfig]
type GettableAzureAgentCheckIn = GettableAgentCheckIn[AzureAgentIntegrationConfig]
type PatchableServiceConfig struct {
OrgID string `json:"org_id"`
ServiceId string `json:"service_id"`
Config []byte `json:"config"` // json serialized config
}
type PatchableCloudServiceConfig[T any] struct {
CloudAccountId string `json:"cloud_account_id"`
Config *T `json:"config"`
}
type PatchableAWSCloudServiceConfig = PatchableCloudServiceConfig[AWSCloudServiceConfig]
type PatchableAzureCloudServiceConfig = PatchableCloudServiceConfig[AzureCloudServiceConfig]
type AWSCloudServiceConfig struct {
Logs *AWSCloudServiceLogsConfig `json:"logs,omitempty"`
Metrics *AWSCloudServiceMetricsConfig `json:"metrics,omitempty"`
}
type AzureCloudServiceConfig struct {
Logs []*AzureCloudServiceLogsConfig `json:"logs,omitempty"`
Metrics []*AzureCloudServiceMetricsConfig `json:"metrics,omitempty"`
}
func (a *AWSCloudServiceConfig) Validate(def *AWSDefinition) error {
if def.Id != S3Sync && a.Logs != nil && a.Logs.S3Buckets != nil {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "s3 buckets can only be added to service-type[%s]", S3Sync)
} else if def.Id == S3Sync && a.Logs != nil && a.Logs.S3Buckets != nil {
for region := range a.Logs.S3Buckets {
if _, found := ValidAWSRegions[region]; !found {
return errors.NewInvalidInputf(CodeInvalidCloudRegion, "invalid cloud region: %s", region)
}
}
}
return nil
}
func (a *AzureCloudServiceConfig) Validate(def *AzureDefinition) error {
logsMap := make(map[string]bool)
metricsMap := make(map[string]bool)
if def.Strategy != nil && def.Strategy.Logs != nil {
for _, log := range def.Strategy.Logs {
logsMap[log.Name] = true
}
}
if def.Strategy != nil && def.Strategy.Metrics != nil {
for _, metric := range def.Strategy.Metrics {
metricsMap[metric.Name] = true
}
}
for _, log := range a.Logs {
if _, found := logsMap[log.Name]; !found {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid log name: %s", log.Name)
}
}
for _, metric := range a.Metrics {
if _, found := metricsMap[metric.Name]; !found {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid metric name: %s", metric.Name)
}
}
return nil
}
type AzureCloudServiceLogsConfig struct {
Enabled bool `json:"enabled"`
Name string `json:"name"`
}
type AzureCloudServiceMetricsConfig struct {
Enabled bool `json:"enabled"`
Name string `json:"name"`
}
type PatchServiceConfigResponse struct {
ServiceId string `json:"id"`
Config any `json:"config"`
}
type PatchableAccountConfig struct {
OrgID string
AccountId string
Data []byte // can be either AWSAccountConfig or AzureAccountConfig
}
type PatchableAccountConfigTyped[T any] struct {
Config *T `json:"config"`
}
type PatchableAWSAccountConfig = PatchableAccountConfigTyped[AWSAccountConfig]
type PatchableAzureAccountConfig = PatchableAccountConfigTyped[AzureAccountConfig]
type AWSAccountConfig struct {
EnabledRegions []string `json:"regions"`
}
type AzureAccountConfig struct {
DeploymentRegion string `json:"deployment_region,omitempty"`
EnabledResourceGroups []string `json:"resource_groups,omitempty"`
}
type GettableServices[T any] struct {
Services []T `json:"services"`
}
type GettableAWSServices = GettableServices[AWSServiceSummary]
type GettableAzureServices = GettableServices[AzureServiceSummary]
type GetServiceDetailsReq struct {
OrgID valuer.UUID
ServiceId string
CloudAccountID *string
}
// --------------------------------------------------------------------------
// DATABASE TYPES
// --------------------------------------------------------------------------
// --------------------------------------------------------------------------
// Cloud integration uses the cloud_integration table
// and cloud_integrations_service table
// --------------------------------------------------------------------------
type CloudIntegration struct {
bun.BaseModel `bun:"table:cloud_integration"`
types.Identifiable
types.TimeAuditable
Provider string `json:"provider" bun:"provider,type:text,unique:provider_id"`
Config string `json:"config" bun:"config,type:text"` // json serialized config
AccountID *string `json:"account_id" bun:"account_id,type:text"`
LastAgentReport *AgentReport `json:"last_agent_report" bun:"last_agent_report,type:text"`
RemovedAt *time.Time `json:"removed_at" bun:"removed_at,type:timestamp,nullzero"`
OrgID string `bun:"org_id,type:text,unique:provider_id"`
}
func (a *CloudIntegration) Status() AccountStatus {
status := AccountStatus{}
if a.LastAgentReport != nil {
lastHeartbeat := a.LastAgentReport.TimestampMillis
status.Integration.LastHeartbeatTsMillis = &lastHeartbeat
}
return status
}
func (a *CloudIntegration) Account(cloudProvider CloudProviderType) *Account {
ca := &Account{Id: a.ID.StringValue(), Status: a.Status()}
if a.AccountID != nil {
ca.CloudAccountId = *a.AccountID
}
ca.Config = map[string]interface{}{}
if len(a.Config) < 1 {
return ca
}
switch cloudProvider {
case CloudProviderAWS:
config := new(AWSAccountConfig)
_ = UnmarshalJSON([]byte(a.Config), config)
ca.Config = config
case CloudProviderAzure:
config := new(AzureAccountConfig)
_ = UnmarshalJSON([]byte(a.Config), config)
ca.Config = config
default:
}
return ca
}
type Account struct {
Id string `json:"id"`
CloudAccountId string `json:"cloud_account_id"`
Config any `json:"config"` // AWSAccountConfig or AzureAccountConfig
Status AccountStatus `json:"status"`
}
type AccountStatus struct {
Integration AccountIntegrationStatus `json:"integration"`
}
type AccountIntegrationStatus struct {
LastHeartbeatTsMillis *int64 `json:"last_heartbeat_ts_ms"`
}
func DefaultAWSAccountConfig() AWSAccountConfig {
return AWSAccountConfig{
EnabledRegions: []string{},
}
}
func DefaultAzureAccountConfig() AzureAccountConfig {
return AzureAccountConfig{
DeploymentRegion: "",
EnabledResourceGroups: []string{},
}
}
type ServiceSummary[T any] struct {
DefinitionMetadata
Config *T `json:"config"`
}
type AWSServiceSummary = ServiceSummary[AWSCloudServiceConfig]
type AzureServiceSummary = ServiceSummary[AzureCloudServiceConfig]
type GettableAWSServiceDetails struct {
AWSDefinition
Config *AWSCloudServiceConfig `json:"config"`
ConnectionStatus *ServiceConnectionStatus `json:"status,omitempty"`
}
type GettableAzureServiceDetails struct {
AzureDefinition
Config *AzureCloudServiceConfig `json:"config"`
ConnectionStatus *ServiceConnectionStatus `json:"status,omitempty"`
}
type ServiceConnectionStatus struct {
Logs []*SignalConnectionStatus `json:"logs"`
Metrics []*SignalConnectionStatus `json:"metrics"`
}
type SignalConnectionStatus struct {
CategoryID string `json:"category"`
CategoryDisplayName string `json:"category_display_name"`
LastReceivedTsMillis int64 `json:"last_received_ts_ms"` // epoch milliseconds
LastReceivedFrom string `json:"last_received_from"` // resource identifier
}
type AgentReport struct {
TimestampMillis int64 `json:"timestamp_millis"`
Data map[string]any `json:"data"`
}
// Scan scans data from db
func (r *AgentReport) Scan(src any) error {
var data []byte
switch v := src.(type) {
case []byte:
data = v
case string:
data = []byte(v)
default:
return errors.NewInternalf(errors.CodeInternal, "tried to scan from %T instead of string or bytes", src)
}
return json.Unmarshal(data, r)
}
// Value serializes data to bytes for db insertion
func (r *AgentReport) Value() (driver.Value, error) {
if r == nil {
return nil, errors.NewInternalf(errors.CodeInternal, "agent report is nil")
}
serialized, err := json.Marshal(r)
if err != nil {
return nil, errors.WrapInternalf(
err, errors.CodeInternal, "couldn't serialize agent report to JSON",
)
}
return serialized, nil
}
type CloudIntegrationService struct {
bun.BaseModel `bun:"table:cloud_integration_service,alias:cis"`
types.Identifiable
types.TimeAuditable
Type string `bun:"type,type:text,notnull,unique:cloud_integration_id_type"`
Config string `bun:"config,type:text"` // json serialized config
CloudIntegrationID string `bun:"cloud_integration_id,type:text,notnull,unique:cloud_integration_id_type,references:cloud_integrations(id),on_delete:cascade"`
}
type AWSCloudServiceLogsConfig struct {
Enabled bool `json:"enabled"`
S3Buckets map[string][]string `json:"s3_buckets,omitempty"`
}
type AWSCloudServiceMetricsConfig struct {
Enabled bool `json:"enabled"`
}

View File

@@ -0,0 +1,199 @@
package integrationstypes
import (
"fmt"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
const (
S3Sync = "s3sync"
)
type AWSDefinition = ServiceDefinition[AWSCollectionStrategy]
type AzureDefinition = ServiceDefinition[AzureCollectionStrategy]
var _ Definition = &AWSDefinition{}
var _ Definition = &AzureDefinition{}
type ServiceDefinition[T any] struct {
DefinitionMetadata
Overview string `json:"overview"` // markdown
Assets Assets `json:"assets"`
SupportedSignals SupportedSignals `json:"supported_signals"`
DataCollected DataCollected `json:"data_collected"`
IngestionStatusCheck *IngestionStatusCheck `json:"ingestion_status_check,omitempty"`
Strategy *T `json:"telemetry_collection_strategy"`
}
func (def *ServiceDefinition[T]) PopulateDashboardURLs(cloudProvider CloudProviderType, svcId string) {
for i := range def.Assets.Dashboards {
dashboardId := def.Assets.Dashboards[i].Id
url := "/dashboard/" + GetCloudIntegrationDashboardID(cloudProvider, svcId, dashboardId)
def.Assets.Dashboards[i].Url = url
}
}
func (def *ServiceDefinition[T]) GetId() string {
return def.Id
}
func (def *ServiceDefinition[T]) Validate() error {
seenDashboardIds := map[string]interface{}{}
if def.Strategy == nil {
return errors.NewInternalf(errors.CodeInternal, "telemetry_collection_strategy is required")
}
for _, dd := range def.Assets.Dashboards {
if _, seen := seenDashboardIds[dd.Id]; seen {
return errors.NewInternalf(errors.CodeInternal, "multiple dashboards found with id %s for Azure Integration", dd.Id)
}
seenDashboardIds[dd.Id] = nil
}
return nil
}
type DefinitionMetadata struct {
Id string `json:"id"`
Title string `json:"title"`
Icon string `json:"icon"`
}
type Definition interface {
GetId() string
Validate() error
PopulateDashboardURLs(cloudProvider CloudProviderType, svcId string)
}
type IngestionStatusCheck struct {
Metrics []*IngestionStatusCheckCategory `json:"metrics"`
Logs []*IngestionStatusCheckCategory `json:"logs"`
}
type IngestionStatusCheckCategory struct {
Category string `json:"category"`
DisplayName string `json:"display_name"`
Checks []*IngestionStatusCheckAttribute `json:"checks"`
}
type IngestionStatusCheckAttribute struct {
Key string `json:"key"` // search key (metric name or log message)
Attributes []*IngestionStatusCheckAttributeFilter `json:"attributes"`
}
type IngestionStatusCheckAttributeFilter struct {
Name string `json:"name"`
Operator string `json:"operator"`
Value string `json:"value"`
}
type Assets struct {
Dashboards []Dashboard `json:"dashboards"`
}
type SupportedSignals struct {
Logs bool `json:"logs"`
Metrics bool `json:"metrics"`
}
type DataCollected struct {
Logs []CollectedLogAttribute `json:"logs"`
Metrics []CollectedMetric `json:"metrics"`
}
type CollectedLogAttribute struct {
Name string `json:"name"`
Path string `json:"path"`
Type string `json:"type"`
}
type CollectedMetric struct {
Name string `json:"name"`
Type string `json:"type"`
Unit string `json:"unit"`
Description string `json:"description"`
}
type AWSCollectionStrategy struct {
Metrics *AWSMetricsStrategy `json:"aws_metrics,omitempty"`
Logs *AWSLogsStrategy `json:"aws_logs,omitempty"`
S3Buckets map[string][]string `json:"s3_buckets,omitempty"` // Only available in S3 Sync Service Type in AWS
}
type AzureCollectionStrategy struct {
Metrics []*AzureMetricsStrategy `json:"azure_metrics,omitempty"`
Logs []*AzureLogsStrategy `json:"azure_logs,omitempty"`
}
type AWSMetricsStrategy struct {
// to be used as https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-metricstream.html#cfn-cloudwatch-metricstream-includefilters
StreamFilters []struct {
// json tags here are in the shape expected by AWS API as detailed at
// https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudwatch-metricstream-metricstreamfilter.html
Namespace string `json:"Namespace"`
MetricNames []string `json:"MetricNames,omitempty"`
} `json:"cloudwatch_metric_stream_filters"`
}
type AWSLogsStrategy struct {
Subscriptions []struct {
// subscribe to all logs groups with specified prefix.
// eg: `/aws/rds/`
LogGroupNamePrefix string `json:"log_group_name_prefix"`
// https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html
// "" implies no filtering is required.
FilterPattern string `json:"filter_pattern"`
} `json:"cloudwatch_logs_subscriptions"`
}
type AzureMetricsStrategy struct {
CategoryType string `json:"category_type"`
Name string `json:"name"`
}
type AzureLogsStrategy struct {
CategoryType string `json:"category_type"`
Name string `json:"name"`
}
type Dashboard struct {
Id string `json:"id"`
Url string `json:"url"`
Title string `json:"title"`
Description string `json:"description"`
Image string `json:"image"`
Definition *dashboardtypes.StorableDashboardData `json:"definition,omitempty"`
}
func GetCloudIntegrationDashboardID(cloudProvider valuer.String, svcId, dashboardId string) string {
return fmt.Sprintf("cloud-integration--%s--%s--%s", cloudProvider, svcId, dashboardId)
}
func GetDashboardsFromAssets(svcId string, cloudProvider CloudProviderType, createdAt *time.Time, assets Assets) []*dashboardtypes.Dashboard {
dashboards := make([]*dashboardtypes.Dashboard, 0)
for _, d := range assets.Dashboards {
author := fmt.Sprintf("%s-integration", cloudProvider)
dashboards = append(dashboards, &dashboardtypes.Dashboard{
ID: GetCloudIntegrationDashboardID(cloudProvider, svcId, d.Id),
Locked: true,
Data: *d.Definition,
TimeAuditable: types.TimeAuditable{
CreatedAt: *createdAt,
UpdatedAt: *createdAt,
},
UserAuditable: types.UserAuditable{
CreatedBy: author,
UpdatedBy: author,
},
})
}
return dashboards
}

View File

@@ -1,4 +1,4 @@
package cloudintegrations
package integrationstypes
import (
"github.com/SigNoz/signoz/pkg/errors"
@@ -41,3 +41,62 @@ var ValidAWSRegions = map[string]bool{
"us-west-1": true, // US West (N. California).
"us-west-2": true, // US West (Oregon).
}
var ValidAzureRegions = map[string]bool{
"australiacentral": true,
"australiacentral2": true,
"australiaeast": true,
"australiasoutheast": true,
"austriaeast": true,
"belgiumcentral": true,
"brazilsouth": true,
"brazilsoutheast": true,
"canadacentral": true,
"canadaeast": true,
"centralindia": true,
"centralus": true,
"chilecentral": true,
"denmarkeast": true,
"eastasia": true,
"eastus": true,
"eastus2": true,
"francecentral": true,
"francesouth": true,
"germanynorth": true,
"germanywestcentral": true,
"indonesiacentral": true,
"israelcentral": true,
"italynorth": true,
"japaneast": true,
"japanwest": true,
"koreacentral": true,
"koreasouth": true,
"malaysiawest": true,
"mexicocentral": true,
"newzealandnorth": true,
"northcentralus": true,
"northeurope": true,
"norwayeast": true,
"norwaywest": true,
"polandcentral": true,
"qatarcentral": true,
"southafricanorth": true,
"southafricawest": true,
"southcentralus": true,
"southindia": true,
"southeastasia": true,
"spaincentral": true,
"swedencentral": true,
"switzerlandnorth": true,
"switzerlandwest": true,
"uaecentral": true,
"uaenorth": true,
"uksouth": true,
"ukwest": true,
"westcentralus": true,
"westeurope": true,
"westindia": true,
"westus": true,
"westus2": true,
"westus3": true,
}

View File

@@ -0,0 +1,109 @@
package integrationstypes
import (
"database/sql/driver"
"encoding/json"
"strings"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
)
// CloudProviderType type alias
type CloudProviderType = valuer.String
var (
CloudProviderAWS = valuer.NewString("aws")
CloudProviderAzure = valuer.NewString("azure")
)
var (
CodeCloudProviderInvalidInput = errors.MustNewCode("invalid_cloud_provider")
)
func NewCloudProvider(provider string) (CloudProviderType, error) {
switch provider {
case CloudProviderAWS.String(), CloudProviderAzure.String():
return valuer.NewString(provider), nil
default:
return CloudProviderType{}, errors.NewInvalidInputf(CodeCloudProviderInvalidInput, "invalid cloud provider: %s", provider)
}
}
var (
AWSIntegrationUserEmail = valuer.MustNewEmail("aws-integration@signoz.io")
AzureIntegrationUserEmail = valuer.MustNewEmail("azure-integration@signoz.io")
)
var IntegrationUserEmails = []valuer.Email{
AWSIntegrationUserEmail,
AzureIntegrationUserEmail,
}
func IsCloudIntegrationDashboardUuid(dashboardUuid string) bool {
parts := strings.SplitN(dashboardUuid, "--", 4)
if len(parts) != 4 {
return false
}
return parts[0] == "cloud-integration"
}
func GetCloudProviderFromDashboardID(dashboardUuid string) (CloudProviderType, error) {
parts := strings.SplitN(dashboardUuid, "--", 4)
if len(parts) != 4 {
return valuer.String{}, errors.NewInvalidInputf(CodeCloudProviderInvalidInput, "invalid dashboard uuid: %s", dashboardUuid)
}
providerStr := parts[1]
cloudProvider, err := NewCloudProvider(providerStr)
if err != nil {
return CloudProviderType{}, err
}
return cloudProvider, nil
}
// --------------------------------------------------------------------------
// Normal integration uses just the installed_integration table
// --------------------------------------------------------------------------
type InstalledIntegration struct {
bun.BaseModel `bun:"table:installed_integration"`
types.Identifiable
Type string `json:"type" bun:"type,type:text,unique:org_id_type"`
Config InstalledIntegrationConfig `json:"config" bun:"config,type:text"`
InstalledAt time.Time `json:"installed_at" bun:"installed_at,default:current_timestamp"`
OrgID string `json:"org_id" bun:"org_id,type:text,unique:org_id_type,references:organizations(id),on_delete:cascade"`
}
type InstalledIntegrationConfig map[string]interface{}
// Scan scans data from db
func (c *InstalledIntegrationConfig) Scan(src interface{}) error {
var data []byte
switch v := src.(type) {
case []byte:
data = v
case string:
data = []byte(v)
default:
return errors.NewInternalf(errors.CodeInternal, "tried to scan from %T instead of string or bytes", src)
}
return json.Unmarshal(data, c)
}
// Value serializes data to db
func (c *InstalledIntegrationConfig) Value() (driver.Value, error) {
filterSetJson, err := json.Marshal(c)
if err != nil {
return nil, errors.WrapInternalf(err, errors.CodeInternal, "could not serialize integration config to JSON")
}
return filterSetJson, nil
}

View File

@@ -112,7 +112,7 @@ def verify_webhook_alert_expectation(
break
# wait for some time before checking again
time.sleep(1)
time.sleep(10)
# We've waited but we didn't get the expected number of alerts
@@ -133,15 +133,3 @@ def verify_webhook_alert_expectation(
)
return True # should not reach here
def update_rule_channel_name(rule_data: dict, channel_name: str):
"""
updates the channel name in the thresholds
so alert notification are sent to the given channel
"""
thresholds = rule_data["condition"]["thresholds"]
if "kind" in thresholds and thresholds["kind"] == "basic":
# loop over all the sepcs and update the channels
for spec in thresholds["spec"]:
spec["channels"] = [channel_name]

View File

@@ -1,18 +1,16 @@
"""Fixtures for cloud integration tests."""
from http import HTTPStatus
from typing import Callable, Optional
from http import HTTPStatus
import pytest
import requests
from fixtures import types
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.logger import setup_logger
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
logger = setup_logger(__name__)
@pytest.fixture(scope="function")
def create_cloud_integration_account(
request: pytest.FixtureRequest,
@@ -26,7 +24,9 @@ def create_cloud_integration_account(
cloud_provider: str = "aws",
) -> dict:
nonlocal created_account_id, cloud_provider_used
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/accounts/generate-connection-url"
endpoint = (
f"/api/v1/cloud-integrations/{cloud_provider}/accounts/generate-connection-url"
)
request_payload = {
"account_config": {"regions": ["us-east-1"]},
@@ -59,7 +59,9 @@ def create_cloud_integration_account(
def _disconnect(admin_token: str, cloud_provider: str) -> requests.Response:
assert created_account_id
disconnect_endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/accounts/{created_account_id}/disconnect"
disconnect_endpoint = (
f"/api/v1/cloud-integrations/{cloud_provider}/accounts/{created_account_id}/disconnect"
)
return requests.post(
signoz.self.host_configs["8080"].get(disconnect_endpoint),
headers={"Authorization": f"Bearer {admin_token}"},

View File

@@ -1,5 +1,4 @@
"""Fixtures for cloud integration tests."""
from http import HTTPStatus
import requests

View File

@@ -43,10 +43,6 @@ class MetricsTimeSeries(ABC):
resource_attrs: dict[str, str] = {},
scope_attrs: dict[str, str] = {},
) -> None:
# Create a copy of labels to avoid mutating the caller's dictionary
labels = dict(labels)
# Add metric_name to the labels to support promql queries
labels["__name__"] = metric_name
self.env = env
self.metric_name = metric_name
self.temporality = temporality

View File

@@ -69,10 +69,6 @@ def signoz( # pylint: disable=too-many-arguments,too-many-positional-arguments
"SIGNOZ_GLOBAL_INGESTION__URL": "https://ingest.test.signoz.cloud",
"SIGNOZ_USER_PASSWORD_RESET_ALLOW__SELF": True,
"SIGNOZ_USER_PASSWORD_RESET_MAX__TOKEN__LIFETIME": "6h",
"RULES_EVAL_DELAY": "0s",
"SIGNOZ_ALERTMANAGER_SIGNOZ_POLL__INTERVAL": "5s",
"SIGNOZ_ALERTMANAGER_SIGNOZ_ROUTE_GROUP__WAIT": "1s",
"SIGNOZ_ALERTMANAGER_SIGNOZ_ROUTE_GROUP__INTERVAL": "5s",
}
| sqlstore.env
| clickhouse.env

View File

@@ -191,15 +191,3 @@ class AlertExpectation:
# seconds to wait for the alerts to be fired, if no
# alerts are fired in the expected time, the test will fail
wait_time_seconds: int
@dataclass(frozen=True)
class AlertTestCase:
# name of the test case
name: str
# path to the rule file in testdata directory
rule_path: str
# list of alert data that will be inserted into the database
alert_data: List[AlertData]
# list of alert expectations for the test case
alert_expectation: AlertExpectation

View File

@@ -8,9 +8,6 @@ from wiremock.client import HttpMethods, Mapping, MappingRequest, MappingRespons
from fixtures import types
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.logger import setup_logger
logger = setup_logger(__name__)
def test_webhook_notification_channel(
@@ -23,7 +20,6 @@ def test_webhook_notification_channel(
"""
Tests the creation and delivery of test alerts on the created notification channel
"""
logger.info("Setting up notification channel")
# Prepare notification channel name and webhook endpoint
notification_channel_name = f"notification-channel-{uuid.uuid4()}"
@@ -59,10 +55,10 @@ def test_webhook_notification_channel(
)
# TODO: @abhishekhugetech # pylint: disable=W0511
# Time required for newly created Org to be registered in the alertmanager is 5 seconds in signoz.py
# Time required for Org to be registered
# in the alertmanager, default 1m.
# this will be fixed after [https://github.com/SigNoz/engineering-pod/issues/3800]
# 10 seconds safe time for org to be registered in the alertmanager
time.sleep(10)
time.sleep(65)
# Call test API for the notification channel
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)

View File

@@ -1,671 +0,0 @@
import json
import uuid
from datetime import datetime, timedelta, timezone
from typing import Callable, List
import pytest
from wiremock.client import HttpMethods, Mapping, MappingRequest, MappingResponse
from fixtures import types
from fixtures.alertutils import (
update_rule_channel_name,
verify_webhook_alert_expectation,
)
from fixtures.logger import setup_logger
from fixtures.utils import get_testdata_file_path
# test cases for match type and compare operators have wait time of 30 seconds to verify the alert expectation.
# we've poistioned the alert data to fire the alert on first eval of rule manager, the eval frequency
# for most alert rules are set of 15s so considering this delay plus some delay from alert manager's
# group_wait and group_interval, even in worst case most alerts should be triggered in about 30 seconds
TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS = [
types.AlertTestCase(
name="test_threshold_above_at_least_once",
rule_path="alerts/test_scenarios/threshold_above_at_least_once/rule.json",
alert_data=[
types.AlertData(
type="metrics",
data_path="alerts/test_scenarios/threshold_above_at_least_once/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "threshold_above_at_least_once",
"threshold.name": "critical",
}
),
],
),
),
types.AlertTestCase(
name="test_threshold_above_all_the_time",
rule_path="alerts/test_scenarios/threshold_above_all_the_time/rule.json",
alert_data=[
types.AlertData(
type="metrics",
data_path="alerts/test_scenarios/threshold_above_all_the_time/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "threshold_above_all_the_time",
"threshold.name": "critical",
}
),
],
),
),
types.AlertTestCase(
name="test_threshold_above_in_total",
rule_path="alerts/test_scenarios/threshold_above_in_total/rule.json",
alert_data=[
types.AlertData(
type="metrics",
data_path="alerts/test_scenarios/threshold_above_in_total/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "threshold_above_in_total",
"threshold.name": "critical",
"service": "server",
},
),
types.FiringAlert(
labels={
"alertname": "threshold_above_in_total",
"threshold.name": "critical",
"service": "api",
}
),
],
),
),
types.AlertTestCase(
name="test_threshold_above_average",
rule_path="alerts/test_scenarios/threshold_above_average/rule.json",
alert_data=[
types.AlertData(
type="traces",
data_path="alerts/test_scenarios/threshold_above_average/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "threshold_above_average",
"threshold.name": "critical",
}
),
],
),
),
# TODO: @abhishekhugetech enable the test for matchType last, pylint: disable=W0511
# after the [issue](https://github.com/SigNoz/engineering-pod/issues/3801) with matchType last is fixed
# types.AlertTestCase(
# name="test_threshold_above_last",
# rule_path="alerts/test_scenarios/threshold_above_last/rule.json",
# alert_data=[
# types.AlertData(
# type="metrics",
# data_path="alerts/test_scenarios/threshold_above_last/alert_data.jsonl",
# ),
# ],
# alert_expectation=types.AlertExpectation(
# should_alert=True,
# wait_time_seconds=30,
# expected_alerts=[
# types.FiringAlert(
# labels={
# "alertname": "threshold_above_last",
# "threshold.name": "critical",
# }
# ),
# ],
# ),
# ),
types.AlertTestCase(
name="test_threshold_below_at_least_once",
rule_path="alerts/test_scenarios/threshold_below_at_least_once/rule.json",
alert_data=[
types.AlertData(
type="logs",
data_path="alerts/test_scenarios/threshold_below_at_least_once/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "threshold_below_at_least_once",
"threshold.name": "critical",
}
),
],
),
),
types.AlertTestCase(
name="test_threshold_below_all_the_time",
rule_path="alerts/test_scenarios/threshold_below_all_the_time/rule.json",
alert_data=[
types.AlertData(
type="logs",
data_path="alerts/test_scenarios/threshold_below_all_the_time/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "threshold_below_all_the_time",
"threshold.name": "critical",
}
),
],
),
),
types.AlertTestCase(
name="test_threshold_below_in_total",
rule_path="alerts/test_scenarios/threshold_below_in_total/rule.json",
alert_data=[
types.AlertData(
type="metrics",
data_path="alerts/test_scenarios/threshold_below_in_total/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "threshold_below_in_total",
"threshold.name": "critical",
}
),
],
),
),
types.AlertTestCase(
name="test_threshold_below_average",
rule_path="alerts/test_scenarios/threshold_below_average/rule.json",
alert_data=[
types.AlertData(
type="metrics",
data_path="alerts/test_scenarios/threshold_below_average/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "threshold_below_average",
"threshold.name": "critical",
}
),
],
),
),
# TODO: @abhishekhugetech enable the test for matchType last,
# after the [issue](https://github.com/SigNoz/engineering-pod/issues/3801) with matchType last is fixed, pylint: disable=W0511
# types.AlertTestCase(
# name="test_threshold_below_last",
# rule_path="alerts/test_scenarios/threshold_below_last/rule.json",
# alert_data=[
# types.AlertData(
# type="metrics",
# data_path="alerts/test_scenarios/threshold_below_last/alert_data.jsonl",
# ),
# ],
# alert_expectation=types.AlertExpectation(
# should_alert=True,
# wait_time_seconds=30,
# expected_alerts=[
# types.FiringAlert(
# labels={
# "alertname": "threshold_below_last",
# "threshold.name": "critical",
# }
# ),
# ],
# ),
# ),
types.AlertTestCase(
name="test_threshold_equal_to_at_least_once",
rule_path="alerts/test_scenarios/threshold_equal_to_at_least_once/rule.json",
alert_data=[
types.AlertData(
type="metrics",
data_path="alerts/test_scenarios/threshold_equal_to_at_least_once/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "threshold_equal_to_at_least_once",
"threshold.name": "critical",
}
),
],
),
),
types.AlertTestCase(
name="test_threshold_equal_to_all_the_time",
rule_path="alerts/test_scenarios/threshold_equal_to_all_the_time/rule.json",
alert_data=[
types.AlertData(
type="metrics",
data_path="alerts/test_scenarios/threshold_equal_to_all_the_time/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "threshold_equal_to_all_the_time",
"threshold.name": "critical",
}
),
],
),
),
types.AlertTestCase(
name="test_threshold_equal_to_in_total",
rule_path="alerts/test_scenarios/threshold_equal_to_in_total/rule.json",
alert_data=[
types.AlertData(
type="metrics",
data_path="alerts/test_scenarios/threshold_equal_to_in_total/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "threshold_equal_to_in_total",
"threshold.name": "critical",
}
),
],
),
),
types.AlertTestCase(
name="test_threshold_equal_to_average",
rule_path="alerts/test_scenarios/threshold_equal_to_average/rule.json",
alert_data=[
types.AlertData(
type="metrics",
data_path="alerts/test_scenarios/threshold_equal_to_average/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "threshold_equal_to_average",
"threshold.name": "critical",
}
),
],
),
),
# TODO: @abhishekhugetech enable the test for matchType last,
# after the [issue](https://github.com/SigNoz/engineering-pod/issues/3801) with matchType last is fixed, pylint: disable=W0511
# types.AlertTestCase(
# name="test_threshold_equal_to_last",
# rule_path="alerts/test_scenarios/threshold_equal_to_last/rule.json",
# alert_data=[
# types.AlertData(
# type="metrics",
# data_path="alerts/test_scenarios/threshold_equal_to_last/alert_data.jsonl",
# ),
# ],
# alert_expectation=types.AlertExpectation(
# should_alert=True,
# wait_time_seconds=30,
# expected_alerts=[
# types.FiringAlert(
# labels={
# "alertname": "threshold_equal_to_last",
# "threshold.name": "critical",
# }
# ),
# ],
# ),
# ),
types.AlertTestCase(
name="test_threshold_not_equal_to_at_least_once",
rule_path="alerts/test_scenarios/threshold_not_equal_to_at_least_once/rule.json",
alert_data=[
types.AlertData(
type="metrics",
data_path="alerts/test_scenarios/threshold_not_equal_to_at_least_once/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "threshold_not_equal_to_at_least_once",
"threshold.name": "critical",
}
),
],
),
),
types.AlertTestCase(
name="test_threshold_not_equal_to_all_the_time",
rule_path="alerts/test_scenarios/threshold_not_equal_to_all_the_time/rule.json",
alert_data=[
types.AlertData(
type="metrics",
data_path="alerts/test_scenarios/threshold_not_equal_to_all_the_time/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "threshold_not_equal_to_all_the_time",
"threshold.name": "critical",
}
),
],
),
),
types.AlertTestCase(
name="test_threshold_not_equal_to_in_total",
rule_path="alerts/test_scenarios/threshold_not_equal_to_in_total/rule.json",
alert_data=[
types.AlertData(
type="metrics",
data_path="alerts/test_scenarios/threshold_not_equal_to_in_total/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "threshold_not_equal_to_in_total",
"threshold.name": "critical",
}
),
],
),
),
types.AlertTestCase(
name="test_threshold_not_equal_to_average",
rule_path="alerts/test_scenarios/threshold_not_equal_to_average/rule.json",
alert_data=[
types.AlertData(
type="metrics",
data_path="alerts/test_scenarios/threshold_not_equal_to_average/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "threshold_not_equal_to_average",
"threshold.name": "critical",
}
),
],
),
),
# TODO: @abhishekhugetech enable the test for matchType last,
# after the [issue](https://github.com/SigNoz/engineering-pod/issues/3801) with matchType last is fixed, pylint: disable=W0511
# types.AlertTestCase(
# name="test_threshold_not_equal_to_last",
# rule_path="alerts/test_scenarios/threshold_not_equal_to_last/rule.json",
# alert_data=[
# types.AlertData(
# type="metrics",
# data_path="alerts/test_scenarios/threshold_not_equal_to_last/alert_data.jsonl",
# ),
# ],
# alert_expectation=types.AlertExpectation(
# should_alert=True,
# wait_time_seconds=30,
# expected_alerts=[
# types.FiringAlert(
# labels={
# "alertname": "threshold_not_equal_to_last",
# "threshold.name": "critical",
# }
# ),
# ],
# ),
# ),
]
# test cases unit conversion
TEST_RULES_UNIT_CONVERSION = [
types.AlertTestCase(
name="test_unit_conversion_bytes_to_mb",
rule_path="alerts/test_scenarios/unit_conversion_bytes_to_mb/rule.json",
alert_data=[
types.AlertData(
type="metrics",
data_path="alerts/test_scenarios/unit_conversion_bytes_to_mb/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "unit_conversion_bytes_to_mb",
"threshold.name": "critical",
}
),
],
),
),
types.AlertTestCase(
name="test_unit_conversion_ms_to_second",
rule_path="alerts/test_scenarios/unit_conversion_ms_to_second/rule.json",
alert_data=[
types.AlertData(
type="metrics",
data_path="alerts/test_scenarios/unit_conversion_ms_to_second/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "unit_conversion_ms_to_second",
"threshold.name": "critical",
}
),
],
),
),
]
# test cases miscellaneous cases, no data and multi threshold
TEST_RULES_MISCELLANEOUS = [
types.AlertTestCase(
name="test_no_data_rule_test",
rule_path="alerts/test_scenarios/no_data_rule_test/rule.json",
alert_data=[
types.AlertData(
type="metrics",
data_path="alerts/test_scenarios/no_data_rule_test/alert_data.jsonl",
),
],
alert_expectation=types.AlertExpectation(
should_alert=True,
wait_time_seconds=30,
expected_alerts=[
types.FiringAlert(
labels={
"alertname": "[No data] no_data_rule_test",
"nodata": "true",
}
),
],
),
),
# TODO: @abhishekhugetech enable the test for multi threshold rule, pylint: disable=W0511
# after the [issue](https://github.com/SigNoz/engineering-pod/issues/3934) with alertManager is resolved
# types.AlertTestCase(
# name="test_multi_threshold_rule_test",
# rule_path="alerts/test_scenarios/multi_threshold_rule_test/rule.json",
# alert_data=[
# types.AlertData(
# type="metrics",
# data_path="alerts/test_scenarios/multi_threshold_rule_test/alert_data.jsonl",
# ),
# ],
# alert_expectation=types.AlertExpectation(
# should_alert=True,
# # the second alert will be fired with some delay from alert manager's group_interval
# # so taking this in consideration, the wait time is 90 seconds (30s + 30s for next alert + 30s buffer)
# wait_time_seconds=90,
# expected_alerts=[
# types.FiringAlert(
# labels={
# "alertname": "multi_threshold_rule_test",
# "threshold.name": "info",
# }
# ),
# types.FiringAlert(
# labels={
# "alertname": "multi_threshold_rule_test",
# "threshold.name": "warning",
# }
# ),
# ],
# ),
# ),
]
logger = setup_logger(__name__)
@pytest.mark.parametrize(
"alert_test_case",
TEST_RULES_MATCH_TYPE_AND_COMPARE_OPERATORS
+ TEST_RULES_UNIT_CONVERSION
+ TEST_RULES_MISCELLANEOUS,
ids=lambda alert_test_case: alert_test_case.name,
)
def test_basic_alert_rule_conditions(
# Notification channel related fixtures
notification_channel: types.TestContainerDocker,
make_http_mocks: Callable[[types.TestContainerDocker, List[Mapping]], None],
create_webhook_notification_channel: Callable[[str, str, dict, bool], str],
# Alert rule related fixtures
create_alert_rule: Callable[[dict], str],
# Alert data insertion related fixtures
insert_alert_data: Callable[[List[types.AlertData], datetime], None],
alert_test_case: types.AlertTestCase,
):
# Prepare notification channel name and webhook endpoint
notification_channel_name = str(uuid.uuid4())
webhook_endpoint_path = f"/alert/{notification_channel_name}"
notification_url = notification_channel.container_configs["8080"].get(
webhook_endpoint_path
)
logger.info("notification_url: %s", {"notification_url": notification_url})
# register the mock endpoint in notification channel
make_http_mocks(
notification_channel,
[
Mapping(
request=MappingRequest(
method=HttpMethods.POST,
url=webhook_endpoint_path,
),
response=MappingResponse(
status=200,
json_body={},
),
persistent=False,
)
],
)
# Create an alert channel using the given route
create_webhook_notification_channel(
channel_name=notification_channel_name,
webhook_url=notification_url,
http_config={},
send_resolved=False,
)
logger.info(
"alert channel created with name: %s",
{"notification_channel_name": notification_channel_name},
)
# Insert alert data
insert_alert_data(
alert_test_case.alert_data,
base_time=datetime.now(tz=timezone.utc) - timedelta(minutes=5),
)
# Create Alert Rule
rule_path = get_testdata_file_path(alert_test_case.rule_path)
with open(rule_path, "r", encoding="utf-8") as f:
rule_data = json.loads(f.read())
# Update the channel name in the rule data
update_rule_channel_name(rule_data, notification_channel_name)
rule_id = create_alert_rule(rule_data)
logger.info(
"rule created with id: %s",
{"rule_id": rule_id, "rule_name": rule_data["alert"]},
)
# Verify alert expectation
verify_webhook_alert_expectation(
notification_channel,
notification_channel_name,
alert_test_case.alert_expectation,
)

View File

@@ -1,6 +1,7 @@
from http import HTTPStatus
from typing import Callable
import pytest
import requests
from fixtures import types
@@ -20,9 +21,7 @@ def test_generate_connection_url(
# Get authentication token for admin user
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
cloud_provider = "aws"
endpoint = (
f"/api/v1/cloud-integrations/{cloud_provider}/accounts/generate-connection-url"
)
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/accounts/generate-connection-url"
# Prepare request payload
request_payload = {
@@ -66,7 +65,9 @@ def test_generate_connection_url(
data = response_data["data"]
# Assert account_id is a valid UUID format
assert len(data["account_id"]) > 0, "account_id should be a non-empty string (UUID)"
assert (
len(data["account_id"]) > 0
), "account_id should be a non-empty string (UUID)"
# Assert connection_url contains expected CloudFormation parameters
connection_url = data["connection_url"]
@@ -110,9 +111,7 @@ def test_generate_connection_url_unsupported_provider(
# Try with GCP (unsupported)
cloud_provider = "gcp"
endpoint = (
f"/api/v1/cloud-integrations/{cloud_provider}/accounts/generate-connection-url"
)
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/accounts/generate-connection-url"
request_payload = {
"account_config": {"regions": ["us-central1"]},
@@ -140,5 +139,5 @@ def test_generate_connection_url_unsupported_provider(
response_data = response.json()
assert "error" in response_data, "Response should contain 'error' field"
assert (
"unsupported cloud provider" in response_data["error"].lower()
"invalid cloud provider: gcp" in response_data["error"]["message"]
), "Error message should indicate unsupported provider"

View File

@@ -1,13 +1,17 @@
import uuid
from http import HTTPStatus
from typing import Callable
import uuid
import pytest
import requests
from fixtures import types
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.cloudintegrationsutils import simulate_agent_checkin
from fixtures.logger import setup_logger
from fixtures.cloudintegrations import (
create_cloud_integration_account,
)
from fixtures.cloudintegrationsutils import simulate_agent_checkin
logger = setup_logger(__name__)
@@ -36,9 +40,8 @@ def test_list_connected_accounts_empty(
data = response_data.get("data", response_data)
assert "accounts" in data, "Response should contain 'accounts' field"
assert isinstance(data["accounts"], list), "Accounts should be a list"
assert (
len(data["accounts"]) == 0
), "Accounts list should be empty when no accounts are connected"
assert len(data["accounts"]) == 0, "Accounts list should be empty when no accounts are connected"
def test_list_connected_accounts_with_account(
@@ -57,9 +60,7 @@ def test_list_connected_accounts_with_account(
# Simulate agent check-in to mark as connected
cloud_account_id = str(uuid.uuid4())
simulate_agent_checkin(
signoz, admin_token, cloud_provider, account_id, cloud_account_id
)
simulate_agent_checkin(signoz, admin_token, cloud_provider, account_id, cloud_account_id)
# List accounts
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/accounts"
@@ -86,6 +87,7 @@ def test_list_connected_accounts_with_account(
assert "status" in account, "Account should have status field"
def test_get_account_status(
signoz: types.SigNoz,
create_user_admin: types.Operation, # pylint: disable=unused-argument
@@ -161,16 +163,16 @@ def test_update_account_config(
# Simulate agent check-in to mark as connected
cloud_account_id = str(uuid.uuid4())
simulate_agent_checkin(
signoz, admin_token, cloud_provider, account_id, cloud_account_id
)
simulate_agent_checkin(signoz, admin_token, cloud_provider, account_id, cloud_account_id)
# Update account configuration
endpoint = (
f"/api/v1/cloud-integrations/{cloud_provider}/accounts/{account_id}/config"
)
updated_config = {"config": {"regions": ["us-east-1", "us-west-2", "eu-west-1"]}}
updated_config = {
"config": {"regions": ["us-east-1", "us-west-2", "eu-west-1"]}
}
response = requests.post(
signoz.self.host_configs["8080"].get(endpoint),
@@ -196,6 +198,7 @@ def test_update_account_config(
timeout=10,
)
list_response_data = list_response.json()
list_data = list_response_data.get("data", list_response_data)
account = next((a for a in list_data["accounts"] if a["id"] == account_id), None)
@@ -210,6 +213,7 @@ def test_update_account_config(
}, "Regions should match updated config"
def test_disconnect_account(
signoz: types.SigNoz,
create_user_admin: types.Operation, # pylint: disable=unused-argument
@@ -226,9 +230,7 @@ def test_disconnect_account(
# Simulate agent check-in to mark as connected
cloud_account_id = str(uuid.uuid4())
simulate_agent_checkin(
signoz, admin_token, cloud_provider, account_id, cloud_account_id
)
simulate_agent_checkin(signoz, admin_token, cloud_provider, account_id, cloud_account_id)
# Disconnect the account
endpoint = (
@@ -260,9 +262,8 @@ def test_disconnect_account(
disconnected_account = next(
(a for a in list_data["accounts"] if a["id"] == account_id), None
)
assert (
disconnected_account is None
), f"Account {account_id} should be removed from connected accounts"
assert disconnected_account is None, f"Account {account_id} should be removed from connected accounts"
def test_disconnect_account_not_found(
@@ -276,7 +277,9 @@ def test_disconnect_account_not_found(
cloud_provider = "aws"
fake_account_id = "00000000-0000-0000-0000-000000000000"
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/accounts/{fake_account_id}/disconnect"
endpoint = (
f"/api/v1/cloud-integrations/{cloud_provider}/accounts/{fake_account_id}/disconnect"
)
response = requests.post(
signoz.self.host_configs["8080"].get(endpoint),
@@ -289,6 +292,7 @@ def test_disconnect_account_not_found(
), f"Expected 404, got {response.status_code}"
def test_list_accounts_unsupported_provider(
signoz: types.SigNoz,
create_user_admin: types.Operation, # pylint: disable=unused-argument

View File

@@ -1,13 +1,17 @@
import uuid
from http import HTTPStatus
from typing import Callable
import uuid
import pytest
import requests
from fixtures import types
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.cloudintegrationsutils import simulate_agent_checkin
from fixtures.logger import setup_logger
from fixtures.cloudintegrations import (
create_cloud_integration_account,
)
from fixtures.cloudintegrationsutils import simulate_agent_checkin
logger = setup_logger(__name__)
@@ -46,6 +50,7 @@ def test_list_services_without_account(
assert "icon" in service, "Service should have 'icon' field"
def test_list_services_with_account(
signoz: types.SigNoz,
create_user_admin: types.Operation, # pylint: disable=unused-argument
@@ -61,9 +66,7 @@ def test_list_services_with_account(
account_id = account_data["account_id"]
cloud_account_id = str(uuid.uuid4())
simulate_agent_checkin(
signoz, admin_token, cloud_provider, account_id, cloud_account_id
)
simulate_agent_checkin(signoz, admin_token, cloud_provider, account_id, cloud_account_id)
# List services for the account
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services?cloud_account_id={cloud_account_id}"
@@ -91,6 +94,7 @@ def test_list_services_with_account(
assert "icon" in service, "Service should have 'icon' field"
def test_get_service_details_without_account(
signoz: types.SigNoz,
create_user_admin: types.Operation, # pylint: disable=unused-argument
@@ -137,6 +141,7 @@ def test_get_service_details_without_account(
assert isinstance(data["assets"], dict), "Assets should be a dictionary"
def test_get_service_details_with_account(
signoz: types.SigNoz,
create_user_admin: types.Operation, # pylint: disable=unused-argument
@@ -152,9 +157,7 @@ def test_get_service_details_with_account(
account_id = account_data["account_id"]
cloud_account_id = str(uuid.uuid4())
simulate_agent_checkin(
signoz, admin_token, cloud_provider, account_id, cloud_account_id
)
simulate_agent_checkin(signoz, admin_token, cloud_provider, account_id, cloud_account_id)
# Get list of services first
list_endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services"
@@ -190,7 +193,7 @@ def test_get_service_details_with_account(
assert "overview" in data, "Service details should have 'overview' field"
assert "assets" in data, "Service details should have 'assets' field"
assert "config" in data, "Service details should have 'config' field"
assert "status" in data, "Config should have 'status' field"
def test_get_service_details_invalid_service(
@@ -216,6 +219,7 @@ def test_get_service_details_invalid_service(
), f"Expected 404, got {response.status_code}"
def test_list_services_unsupported_provider(
signoz: types.SigNoz,
create_user_admin: types.Operation, # pylint: disable=unused-argument
@@ -238,6 +242,7 @@ def test_list_services_unsupported_provider(
), f"Expected 400, got {response.status_code}"
def test_update_service_config(
signoz: types.SigNoz,
create_user_admin: types.Operation, # pylint: disable=unused-argument
@@ -253,9 +258,7 @@ def test_update_service_config(
account_id = account_data["account_id"]
cloud_account_id = str(uuid.uuid4())
simulate_agent_checkin(
signoz, admin_token, cloud_provider, account_id, cloud_account_id
)
simulate_agent_checkin(signoz, admin_token, cloud_provider, account_id, cloud_account_id)
# Get list of services to pick a valid service ID
list_endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services"
@@ -270,9 +273,7 @@ def test_update_service_config(
service_id = list_data["services"][0]["id"]
# Update service configuration
endpoint = (
f"/api/v1/cloud-integrations/{cloud_provider}/services/{service_id}/config"
)
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services/{service_id}/config"
config_payload = {
"cloud_account_id": cloud_account_id,
@@ -304,6 +305,7 @@ def test_update_service_config(
assert "logs" in data["config"], "Config should contain 'logs' field"
def test_update_service_config_without_account(
signoz: types.SigNoz,
create_user_admin: types.Operation, # pylint: disable=unused-argument
@@ -326,9 +328,7 @@ def test_update_service_config_without_account(
service_id = list_data["services"][0]["id"]
# Try to update config with non-existent account
endpoint = (
f"/api/v1/cloud-integrations/{cloud_provider}/services/{service_id}/config"
)
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services/{service_id}/config"
fake_cloud_account_id = str(uuid.uuid4())
config_payload = {
@@ -346,8 +346,9 @@ def test_update_service_config_without_account(
)
assert (
response.status_code == HTTPStatus.INTERNAL_SERVER_ERROR
), f"Expected 500 for non-existent account, got {response.status_code}"
response.status_code == HTTPStatus.NOT_FOUND
), f"Expected 400 for non-existent account, got {response.status_code}"
def test_update_service_config_invalid_service(
@@ -365,15 +366,11 @@ def test_update_service_config_invalid_service(
account_id = account_data["account_id"]
cloud_account_id = str(uuid.uuid4())
simulate_agent_checkin(
signoz, admin_token, cloud_provider, account_id, cloud_account_id
)
simulate_agent_checkin(signoz, admin_token, cloud_provider, account_id, cloud_account_id)
# Try to update config for invalid service
fake_service_id = "non-existent-service"
endpoint = (
f"/api/v1/cloud-integrations/{cloud_provider}/services/{fake_service_id}/config"
)
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services/{fake_service_id}/config"
config_payload = {
"cloud_account_id": cloud_account_id,
@@ -394,6 +391,7 @@ def test_update_service_config_invalid_service(
), f"Expected 404 for invalid service, got {response.status_code}"
def test_update_service_config_disable_service(
signoz: types.SigNoz,
create_user_admin: types.Operation, # pylint: disable=unused-argument
@@ -409,9 +407,7 @@ def test_update_service_config_disable_service(
account_id = account_data["account_id"]
cloud_account_id = str(uuid.uuid4())
simulate_agent_checkin(
signoz, admin_token, cloud_provider, account_id, cloud_account_id
)
simulate_agent_checkin(signoz, admin_token, cloud_provider, account_id, cloud_account_id)
# Get a valid service
list_endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services"
@@ -425,9 +421,7 @@ def test_update_service_config_disable_service(
service_id = list_data["services"][0]["id"]
# First enable the service
endpoint = (
f"/api/v1/cloud-integrations/{cloud_provider}/services/{service_id}/config"
)
endpoint = f"/api/v1/cloud-integrations/{cloud_provider}/services/{service_id}/config"
enable_payload = {
"cloud_account_id": cloud_account_id,

View File

@@ -2,11 +2,11 @@
Look at the multi_temporality_counters_1h.jsonl file for the relevant data
"""
import random
import os
from datetime import datetime, timedelta, timezone
from http import HTTPStatus
from typing import Callable, List
import random
from typing import Any, Callable, List
import pytest
from fixtures import types
@@ -21,13 +21,8 @@ from fixtures.querier import (
from fixtures.utils import get_testdata_file_path
MULTI_TEMPORALITY_FILE = get_testdata_file_path("multi_temporality_counters_1h.jsonl")
MULTI_TEMPORALITY_FILE_10h = get_testdata_file_path(
"multi_temporality_counters_10h.jsonl"
)
MULTI_TEMPORALITY_FILE_24h = get_testdata_file_path(
"multi_temporality_counters_24h.jsonl"
)
MULTI_TEMPORALITY_FILE_10h = get_testdata_file_path("multi_temporality_counters_10h.jsonl")
MULTI_TEMPORALITY_FILE_24h = get_testdata_file_path("multi_temporality_counters_24h.jsonl")
@pytest.mark.parametrize(
"time_aggregation, expected_value_at_31st_minute, expected_value_at_32nd_minute, steady_value",
@@ -44,7 +39,7 @@ def test_with_steady_values_and_reset(
time_aggregation: str,
expected_value_at_31st_minute: float,
expected_value_at_32nd_minute: float,
steady_value: float,
steady_value: float
) -> None:
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
@@ -74,21 +69,22 @@ def test_with_steady_values_and_reset(
result_values = sorted(get_series_values(data, "A"), key=lambda x: x["timestamp"])
assert len(result_values) >= 59
# the counter reset happened at 31st minute
assert result_values[30]["value"] == expected_value_at_31st_minute
assert result_values[31]["value"] == expected_value_at_32nd_minute
assert (
result_values[30]["value"] == expected_value_at_31st_minute
)
assert (
result_values[31]["value"] == expected_value_at_32nd_minute
)
assert (
result_values[39]["value"] == steady_value
) # 39th minute is when cumulative shifts to delta
) # 39th minute is when cumulative shifts to delta
count_of_steady_rate = sum(1 for v in result_values if v["value"] == steady_value)
assert (
count_of_steady_rate >= 56
) # 59 - (1 reset + 1 high rate + 1 at the beginning)
# All rates should be non-negative (stale periods = 0 rate)
for v in result_values:
assert (
v["value"] >= 0
), f"{time_aggregation} should not be negative: {v['value']}"
assert v["value"] >= 0, f"{time_aggregation} should not be negative: {v['value']}"
@pytest.mark.parametrize(
"time_aggregation, stable_health_value, stable_products_value, stable_checkout_value, spike_checkout_value, stable_orders_value, spike_users_value",
@@ -165,26 +161,20 @@ def test_group_by_endpoint(
assert (
len(health_values) >= 58
), f"Expected >= 58 values for /health, got {len(health_values)}"
count_steady_health = sum(
1 for v in health_values if v["value"] == stable_health_value
)
count_steady_health = sum(1 for v in health_values if v["value"] == stable_health_value)
assert (
count_steady_health >= 57
), f"Expected >= 57 steady rate values ({stable_health_value}) for /health, got {count_steady_health}"
# all /health rates should be state except possibly first/last due to boundaries
for v in health_values[1:-1]:
assert (
v["value"] == stable_health_value
), f"Expected /health rate {stable_health_value}, got {v['value']}"
assert v["value"] == stable_health_value, f"Expected /health rate {stable_health_value}, got {v['value']}"
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
products_values = endpoint_values["/products"]
assert (
len(products_values) >= 49
), f"Expected >= 49 values for /products, got {len(products_values)}"
count_steady_products = sum(
1 for v in products_values if v["value"] == stable_products_value
)
count_steady_products = sum(1 for v in products_values if v["value"] == stable_products_value)
# most values should be stable, some boundary values differ due to 10-min gap
assert (
@@ -192,9 +182,7 @@ def test_group_by_endpoint(
), f"Expected >= 46 steady rate values ({stable_products_value}) for /products, got {count_steady_products}"
# check that non-stable values are due to gap averaging (should be lower)
gap_boundary_values = [
v["value"] for v in products_values if v["value"] != stable_products_value
]
gap_boundary_values = [v["value"] for v in products_values if v["value"] != stable_products_value]
for val in gap_boundary_values:
assert (
0 < val < stable_products_value
@@ -205,16 +193,12 @@ def test_group_by_endpoint(
assert (
len(checkout_values) >= 59
), f"Expected >= 59 values for /checkout, got {len(checkout_values)}"
count_steady_checkout = sum(
1 for v in checkout_values if v["value"] == stable_checkout_value
)
count_steady_checkout = sum(1 for v in checkout_values if v["value"] == stable_checkout_value)
assert (
count_steady_checkout >= 53
), f"Expected >= 53 steady {time_aggregation} values ({stable_checkout_value}) for /checkout, got {count_steady_checkout}"
# check that spike values exist (traffic spike +50/min at t40-t44)
count_spike_checkout = sum(
1 for v in checkout_values if v["value"] == spike_checkout_value
)
count_spike_checkout = sum(1 for v in checkout_values if v["value"] == spike_checkout_value)
assert (
count_spike_checkout >= 4
), f"Expected >= 4 spike {time_aggregation} values ({spike_checkout_value}) for /checkout, got {count_spike_checkout}"
@@ -236,16 +220,12 @@ def test_group_by_endpoint(
assert (
len(orders_values) >= 58
), f"Expected >= 58 values for /orders, got {len(orders_values)}"
count_steady_orders = sum(
1 for v in orders_values if v["value"] == stable_orders_value
)
count_steady_orders = sum(1 for v in orders_values if v["value"] == stable_orders_value)
assert (
count_steady_orders >= 55
), f"Expected >= 55 steady {time_aggregation} values ({stable_orders_value}) for /orders, got {count_steady_orders}"
# check for counter reset effects - there should be some non-standard values
non_standard_orders = [
v["value"] for v in orders_values if v["value"] != stable_orders_value
]
non_standard_orders = [v["value"] for v in orders_values if v["value"] != stable_orders_value]
assert (
len(non_standard_orders) >= 2
), f"Expected >= 2 non-standard values due to counter reset, got {non_standard_orders}"
@@ -272,7 +252,6 @@ def test_group_by_endpoint(
count_increment_rate >= 8
), f"Expected >= 8 increment {time_aggregation} values ({spike_users_value}) for /users, got {count_increment_rate}"
@pytest.mark.parametrize(
"time_aggregation, expected_value_at_30th_minute, expected_value_at_31st_minute, value_at_switch",
[
@@ -288,7 +267,7 @@ def test_for_service_with_switch(
time_aggregation: str,
expected_value_at_30th_minute: float,
expected_value_at_31st_minute: float,
value_at_switch: float,
value_at_switch: float
) -> None:
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
@@ -317,18 +296,21 @@ def test_for_service_with_switch(
data = response.json()
result_values = sorted(get_series_values(data, "A"), key=lambda x: x["timestamp"])
assert len(result_values) >= 60
assert result_values[30]["value"] == expected_value_at_30th_minute # 0.183
assert result_values[31]["value"] == expected_value_at_31st_minute # 0.183
assert result_values[38]["value"] == value_at_switch # 0.25
assert (
result_values[39]["value"] == value_at_switch # 0.25
) # 39th minute is when cumulative shifts to delta
result_values[30]["value"] == expected_value_at_30th_minute #0.183
)
assert (
result_values[31]["value"] == expected_value_at_31st_minute # 0.183
)
assert (
result_values[38]["value"] == value_at_switch # 0.25
)
assert (
result_values[39]["value"] == value_at_switch # 0.25
) # 39th minute is when cumulative shifts to delta
# All rates should be non-negative (stale periods = 0 rate)
for v in result_values:
assert (
v["value"] >= 0
), f"{time_aggregation} should not be negative: {v['value']}"
assert v["value"] >= 0, f"{time_aggregation} should not be negative: {v['value']}"
@pytest.mark.parametrize(
"time_aggregation, expected_value",
@@ -373,7 +355,6 @@ def test_for_week_long_time_range(
for value in result_values[1:]:
assert value["value"] == expected_value
@pytest.mark.parametrize(
"time_aggregation, expected_value",
[

View File

@@ -1,15 +1,15 @@
import pytest
from http import HTTPStatus
from typing import Callable
import pytest
import requests
from sqlalchemy import sql
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.types import Operation, SigNoz
ANONYMOUS_USER_ID = "00000000-0000-0000-0000-000000000000"
ANONYMOUS_USER_ID = "00000000-0000-0000-0000-000000000000"
def test_managed_roles_create_on_register(
signoz: SigNoz,
@@ -17,7 +17,7 @@ def test_managed_roles_create_on_register(
get_token: Callable[[str, str], str],
):
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
# get the list of all roles.
response = requests.get(
signoz.self.host_configs["8080"].get("/api/v1/roles"),
@@ -32,22 +32,18 @@ def test_managed_roles_create_on_register(
# since this check happens immediately post registeration, all the managed roles should be present.
assert len(data) == 4
role_names = {role["name"] for role in data}
expected_names = {
"signoz-admin",
"signoz-viewer",
"signoz-editor",
"signoz-anonymous",
}
expected_names = {"signoz-admin", "signoz-viewer", "signoz-editor", "signoz-anonymous"}
# do the set mapping as this is order insensitive, direct list match is order-sensitive.
assert set(role_names) == expected_names
def test_root_user_signoz_admin_assignment(
request: pytest.FixtureRequest,
signoz: SigNoz,
create_user_admin: Operation, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
):
):
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
# Get the user from the /user/me endpoint and extract the id
@@ -68,16 +64,14 @@ def test_root_user_signoz_admin_assignment(
# this validates to some extent that the role assignment is complete under the assumption that middleware is functioning as expected.
assert response.status_code == HTTPStatus.OK
assert response.json()["status"] == "success"
# Loop over the roles and get the org_id and id for signoz-admin role
roles = response.json()["data"]
admin_role_entry = next(
(role for role in roles if role["name"] == "signoz-admin"), None
)
admin_role_entry = next((role for role in roles if role["name"] == "signoz-admin"), None)
assert admin_role_entry is not None
org_id = admin_role_entry["orgId"]
# to be super sure of authorization server, let's validate the tuples in DB as well.
# to be super sure of authorization server, let's validate the tuples in DB as well.
# todo[@vikrantgupta25]: replace this with role memebers handler once built.
with signoz.sqlstore.conn.connect() as conn:
# verify the entry present for role assignment
@@ -86,14 +80,15 @@ def test_root_user_signoz_admin_assignment(
sql.text("SELECT * FROM tuple WHERE object_id = :object_id"),
{"object_id": tuple_object_id},
)
tuple_row = tuple_result.mappings().fetchone()
assert tuple_row is not None
# check that the tuple if for role assignment
assert tuple_row["object_type"] == "role"
assert tuple_row["relation"] == "assignee"
assert tuple_row['object_type'] == "role"
assert tuple_row['relation'] == "assignee"
if request.config.getoption("--sqlstore-provider") == "sqlite":
if request.config.getoption("--sqlstore-provider") == 'sqlite':
user_object_id = f"organization/{org_id}/user/{user_id}"
assert tuple_row["user_object_type"] == "user"
assert tuple_row["user_object_id"] == user_object_id
@@ -102,13 +97,13 @@ def test_root_user_signoz_admin_assignment(
assert tuple_row["user_type"] == "user"
assert tuple_row["_user"] == _user
def test_anonymous_user_signoz_anonymous_assignment(
request: pytest.FixtureRequest,
signoz: SigNoz,
create_user_admin: Operation, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
):
):
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
response = requests.get(
@@ -120,16 +115,14 @@ def test_anonymous_user_signoz_anonymous_assignment(
# this validates to some extent that the role assignment is complete under the assumption that middleware is functioning as expected.
assert response.status_code == HTTPStatus.OK
assert response.json()["status"] == "success"
# Loop over the roles and get the org_id and id for signoz-admin role
roles = response.json()["data"]
admin_role_entry = next(
(role for role in roles if role["name"] == "signoz-anonymous"), None
)
admin_role_entry = next((role for role in roles if role["name"] == "signoz-anonymous"), None)
assert admin_role_entry is not None
org_id = admin_role_entry["orgId"]
# to be super sure of authorization server, let's validate the tuples in DB as well.
# to be super sure of authorization server, let's validate the tuples in DB as well.
# todo[@vikrantgupta25]: replace this with role memebers handler once built.
with signoz.sqlstore.conn.connect() as conn:
# verify the entry present for role assignment
@@ -138,14 +131,15 @@ def test_anonymous_user_signoz_anonymous_assignment(
sql.text("SELECT * FROM tuple WHERE object_id = :object_id"),
{"object_id": tuple_object_id},
)
tuple_row = tuple_result.mappings().fetchone()
assert tuple_row is not None
# check that the tuple if for role assignment
assert tuple_row["object_type"] == "role"
assert tuple_row["relation"] == "assignee"
assert tuple_row['object_type'] == "role"
assert tuple_row['relation'] == "assignee"
if request.config.getoption("--sqlstore-provider") == "sqlite":
if request.config.getoption("--sqlstore-provider") == 'sqlite':
user_object_id = f"organization/{org_id}/anonymous/{ANONYMOUS_USER_ID}"
assert tuple_row["user_object_type"] == "anonymous"
assert tuple_row["user_object_id"] == user_object_id
@@ -153,3 +147,5 @@ def test_anonymous_user_signoz_anonymous_assignment(
_user = f"anonymous:organization/{org_id}/anonymous/{ANONYMOUS_USER_ID}"
assert tuple_row["user_type"] == "user"
assert tuple_row["_user"] == _user

View File

@@ -1,16 +1,11 @@
import pytest
from http import HTTPStatus
from typing import Callable
import pytest
import requests
from sqlalchemy import sql
from fixtures.auth import (
USER_ADMIN_EMAIL,
USER_ADMIN_PASSWORD,
USER_EDITOR_EMAIL,
USER_EDITOR_PASSWORD,
)
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD, USER_EDITOR_EMAIL, USER_EDITOR_PASSWORD
from fixtures.types import Operation, SigNoz
@@ -21,7 +16,7 @@ def test_user_invite_accept_role_grant(
get_token: Callable[[str, str], str],
):
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
# invite a user as editor
invite_payload = {
"email": USER_EDITOR_EMAIL,
@@ -35,7 +30,7 @@ def test_user_invite_accept_role_grant(
)
assert invite_response.status_code == HTTPStatus.CREATED
invite_token = invite_response.json()["data"]["token"]
# accept the invite for editor
accept_payload = {
"token": invite_token,
@@ -45,7 +40,7 @@ def test_user_invite_accept_role_grant(
signoz.self.host_configs["8080"].get("/api/v1/invite/accept"),
json=accept_payload,
timeout=2,
)
)
assert accept_response.status_code == HTTPStatus.CREATED
# Login with editor email and password
@@ -58,6 +53,7 @@ def test_user_invite_accept_role_grant(
assert user_me_response.status_code == HTTPStatus.OK
editor_id = user_me_response.json()["data"]["id"]
# check the forbidden response for admin api for editor user
admin_roles_response = requests.get(
signoz.self.host_configs["8080"].get("/api/v1/roles"),
@@ -83,11 +79,11 @@ def test_user_invite_accept_role_grant(
)
tuple_row = tuple_result.mappings().fetchone()
assert tuple_row is not None
assert tuple_row["object_type"] == "role"
assert tuple_row["relation"] == "assignee"
assert tuple_row['object_type'] == "role"
assert tuple_row['relation'] == "assignee"
# verify the user tuple details depending on db provider
if request.config.getoption("--sqlstore-provider") == "sqlite":
if request.config.getoption("--sqlstore-provider") == 'sqlite':
user_object_id = f"organization/{org_id}/user/{editor_id}"
assert tuple_row["user_object_type"] == "user"
assert tuple_row["user_object_id"] == user_object_id
@@ -97,6 +93,7 @@ def test_user_invite_accept_role_grant(
assert tuple_row["_user"] == _user
def test_user_update_role_grant(
request: pytest.FixtureRequest,
signoz: SigNoz,
@@ -125,7 +122,9 @@ def test_user_update_role_grant(
org_id = roles_data[0]["orgId"]
# Update the user's role to viewer
update_payload = {"role": "VIEWER"}
update_payload = {
"role": "VIEWER"
}
update_response = requests.put(
signoz.self.host_configs["8080"].get(f"/api/v1/user/{editor_id}"),
json=update_payload,
@@ -140,9 +139,7 @@ def test_user_update_role_grant(
viewer_tuple_object_id = f"organization/{org_id}/role/signoz-viewer"
# Check there is no tuple for signoz-editor assignment
editor_tuple_result = conn.execute(
sql.text(
"SELECT * FROM tuple WHERE object_id = :object_id AND relation = 'assignee'"
),
sql.text("SELECT * FROM tuple WHERE object_id = :object_id AND relation = 'assignee'"),
{"object_id": editor_tuple_object_id},
)
for row in editor_tuple_result.mappings().fetchall():
@@ -155,15 +152,13 @@ def test_user_update_role_grant(
# Check that a tuple exists for signoz-viewer assignment
viewer_tuple_result = conn.execute(
sql.text(
"SELECT * FROM tuple WHERE object_id = :object_id AND relation = 'assignee'"
),
sql.text("SELECT * FROM tuple WHERE object_id = :object_id AND relation = 'assignee'"),
{"object_id": viewer_tuple_object_id},
)
row = viewer_tuple_result.mappings().fetchone()
assert row is not None
assert row["object_type"] == "role"
assert row["relation"] == "assignee"
assert row['object_type'] == "role"
assert row['relation'] == "assignee"
if request.config.getoption("--sqlstore-provider") == "sqlite":
user_object_id = f"organization/{org_id}/user/{editor_id}"
assert row["user_object_type"] == "user"
@@ -173,7 +168,6 @@ def test_user_update_role_grant(
assert row["user_type"] == "user"
assert row["_user"] == _user
def test_user_delete_role_revoke(
request: pytest.FixtureRequest,
signoz: SigNoz,
@@ -211,12 +205,10 @@ def test_user_delete_role_revoke(
with signoz.sqlstore.conn.connect() as conn:
tuple_result = conn.execute(
sql.text(
"SELECT * FROM tuple WHERE object_id = :object_id AND relation = 'assignee'"
),
sql.text("SELECT * FROM tuple WHERE object_id = :object_id AND relation = 'assignee'"),
{"object_id": tuple_object_id},
)
# there should NOT be any tuple for the current user assignment
tuple_rows = tuple_result.mappings().fetchall()
for row in tuple_rows:
@@ -225,4 +217,4 @@ def test_user_delete_role_revoke(
assert row["user_object_id"] != user_object_id
else:
_user = f"user:organization/{org_id}/user/{editor_id}"
assert row["_user"] != _user
assert row["_user"] != _user

View File

@@ -1,12 +0,0 @@
{"metric_name":"cpu_percent_multi_threshold_rule_test","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:01:00+00:00","value":2,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"cpu_percent_multi_threshold_rule_test","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:02:00+00:00","value":15,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"cpu_percent_multi_threshold_rule_test","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:03:00+00:00","value":14,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"cpu_percent_multi_threshold_rule_test","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:04:00+00:00","value":3,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"cpu_percent_multi_threshold_rule_test","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:05:00+00:00","value":6,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"cpu_percent_multi_threshold_rule_test","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:06:00+00:00","value":25,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"cpu_percent_multi_threshold_rule_test","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:07:00+00:00","value":25,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"cpu_percent_multi_threshold_rule_test","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:08:00+00:00","value":25,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"cpu_percent_multi_threshold_rule_test","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:09:00+00:00","value":10,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"cpu_percent_multi_threshold_rule_test","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:10:00+00:00","value":12,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"cpu_percent_multi_threshold_rule_test","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:11:00+00:00","value":8,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"cpu_percent_multi_threshold_rule_test","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:12:00+00:00","value":15,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}

View File

@@ -1,83 +0,0 @@
{
"alert": "multi_threshold_rule_test",
"ruleType": "threshold_rule",
"alertType": "METRIC_BASED_ALERT",
"condition": {
"thresholds": {
"kind": "basic",
"spec": [
{
"name": "critical",
"target": 30,
"matchType": "1",
"op": "1",
"channels": [
"test channel"
]
},
{
"name": "warning",
"target": 20,
"matchType": "1",
"op": "1",
"channels": [
"test channel"
]
},
{
"name": "info",
"target": 10,
"matchType": "1",
"op": "1",
"channels": [
"test channel"
]
}
]
},
"compositeQuery": {
"queryType": "builder",
"panelType": "graph",
"queries": [
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "metrics",
"aggregations": [
{
"metricName": "cpu_percent_multi_threshold_rule_test",
"timeAggregation": "avg",
"spaceAggregation": "max"
}
]
}
}
]
},
"selectedQueryName": "A"
},
"evaluation": {
"kind": "rolling",
"spec": {
"evalWindow": "5m0s",
"frequency": "15s"
}
},
"labels": {},
"annotations": {
"description": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})",
"summary": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})"
},
"notificationSettings": {
"groupBy": [],
"usePolicy": false,
"renotify": {
"enabled": false,
"interval": "30m",
"alertStates": []
}
},
"version": "v5",
"schemaVersion": "v2alpha1"
}

View File

@@ -1,12 +0,0 @@
{"metric_name":"request_total_no_data_rule_test","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:01:00+00:00","value":12,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_no_data_rule_test","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:02:00+00:00","value":26,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_no_data_rule_test","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:03:00+00:00","value":41,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_no_data_rule_test","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:04:00+00:00","value":56,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_no_data_rule_test","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:05:00+00:00","value":71,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_no_data_rule_test","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:06:00+00:00","value":86,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_no_data_rule_test","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:07:00+00:00","value":101,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_no_data_rule_test","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:08:00+00:00","value":116,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_no_data_rule_test","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:09:00+00:00","value":131,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_no_data_rule_test","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:10:00+00:00","value":146,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_no_data_rule_test","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:11:00+00:00","value":161,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_no_data_rule_test","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:12:00+00:00","value":176,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}

View File

@@ -1,70 +0,0 @@
{
"alert": "no_data_rule_test",
"ruleType": "threshold_rule",
"alertType": "METRIC_BASED_ALERT",
"condition": {
"thresholds": {
"kind": "basic",
"spec": [
{
"name": "critical",
"target": 10,
"matchType": "1",
"op": "1",
"channels": [
"test channel"
]
}
]
},
"compositeQuery": {
"queryType": "builder",
"panelType": "graph",
"queries": [
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "metrics",
"filter": {
"expression": "service = 'server'"
},
"aggregations": [
{
"metricName": "request_total_no_data_rule_test",
"timeAggregation": "rate",
"spaceAggregation": "sum"
}
]
}
}
]
},
"selectedQueryName": "A",
"alertOnAbsent": true,
"absentFor": 1
},
"evaluation": {
"kind": "rolling",
"spec": {
"evalWindow": "5m0s",
"frequency": "15s"
}
},
"labels": {},
"annotations": {
"description": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})",
"summary": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})"
},
"notificationSettings": {
"groupBy": [],
"usePolicy": false,
"renotify": {
"enabled": false,
"interval": "30m",
"alertStates": []
}
},
"version": "v5",
"schemaVersion": "v2alpha1"
}

View File

@@ -1,12 +0,0 @@
{"metric_name":"cpu_percent_threshold_above_all_the_time","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:01:00+00:00","value":12,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"cpu_percent_threshold_above_all_the_time","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:02:00+00:00","value":14,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"cpu_percent_threshold_above_all_the_time","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:03:00+00:00","value":15,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"cpu_percent_threshold_above_all_the_time","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:04:00+00:00","value":15,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"cpu_percent_threshold_above_all_the_time","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:05:00+00:00","value":15,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"cpu_percent_threshold_above_all_the_time","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:06:00+00:00","value":15,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"cpu_percent_threshold_above_all_the_time","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:07:00+00:00","value":15,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"cpu_percent_threshold_above_all_the_time","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:08:00+00:00","value":15,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"cpu_percent_threshold_above_all_the_time","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:09:00+00:00","value":15,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"cpu_percent_threshold_above_all_the_time","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:10:00+00:00","value":15,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"cpu_percent_threshold_above_all_the_time","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:11:00+00:00","value":15,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"cpu_percent_threshold_above_all_the_time","labels":{"host":"server-01","cpu":"cpu0"},"timestamp":"2026-01-29T10:12:00+00:00","value":15,"temporality":"Unspecified","type_":"Gauge","is_monotonic":false,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}

View File

@@ -1,58 +0,0 @@
{
"alert": "threshold_above_all_the_time",
"ruleType": "promql_rule",
"alertType": "METRIC_BASED_ALERT",
"condition": {
"thresholds": {
"kind": "basic",
"spec": [
{
"name": "critical",
"target": 10,
"matchType": "2",
"op": "1",
"channels": [
"test channel"
]
}
]
},
"compositeQuery": {
"queryType": "promql",
"panelType": "graph",
"queries": [
{
"type": "promql",
"spec": {
"name": "A",
"query": "{\"cpu_percent_threshold_above_all_the_time\"}"
}
}
]
},
"selectedQueryName": "A"
},
"evaluation": {
"kind": "rolling",
"spec": {
"evalWindow": "5m0s",
"frequency": "15s"
}
},
"labels": {},
"annotations": {
"description": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})",
"summary": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})"
},
"notificationSettings": {
"groupBy": [],
"usePolicy": false,
"renotify": {
"enabled": false,
"interval": "30m",
"alertStates": []
}
},
"version": "v5",
"schemaVersion": "v2alpha1"
}

View File

@@ -1,12 +0,0 @@
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:01:00+00:00","value":1,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:02:00+00:00","value":2,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:03:00+00:00","value":3,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:04:00+00:00","value":4,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:05:00+00:00","value":19,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:06:00+00:00","value":20,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:07:00+00:00","value":35,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:08:00+00:00","value":36,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:09:00+00:00","value":37,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:10:00+00:00","value":38,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:11:00+00:00","value":39,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_at_least_once","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:12:00+00:00","value":40,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}

View File

@@ -1,58 +0,0 @@
{
"alert": "threshold_above_at_least_once",
"ruleType": "threshold_rule",
"alertType": "METRIC_BASED_ALERT",
"condition": {
"thresholds": {
"kind": "basic",
"spec": [
{
"name": "critical",
"target": 10,
"matchType": "1",
"op": "1",
"channels": [
"test channel"
]
}
]
},
"compositeQuery": {
"queryType": "clickhouse_sql",
"panelType": "graph",
"queries": [
{
"type": "clickhouse_sql",
"spec": {
"name": "A",
"query": "WITH __temporal_aggregation_cte AS (\n SELECT \n fingerprint, \n toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(60)) AS ts, \n avg(value) AS per_series_value \n FROM signoz_metrics.distributed_samples_v4 AS points \n INNER JOIN (\n SELECT fingerprint \n FROM signoz_metrics.time_series_v4 \n WHERE metric_name IN ('request_total_threshold_above_at_least_once') \n AND LOWER(temporality) LIKE LOWER('cumulative') \n AND __normalized = false \n GROUP BY fingerprint\n ) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint \n WHERE metric_name IN ('request_total_threshold_above_at_least_once') \n AND unix_milli >= {{.start_timestamp_ms}} \n AND unix_milli < {{.end_timestamp_ms}} \n GROUP BY fingerprint, ts \n ORDER BY fingerprint, ts\n), \n__spatial_aggregation_cte AS (\n SELECT \n ts, \n avg(per_series_value) AS value \n FROM __temporal_aggregation_cte \n WHERE isNaN(per_series_value) = 0 \n GROUP BY ts\n) \nSELECT * FROM __spatial_aggregation_cte \nORDER BY ts"
}
}
]
},
"selectedQueryName": "A"
},
"evaluation": {
"kind": "rolling",
"spec": {
"evalWindow": "5m0s",
"frequency": "15s"
}
},
"labels": {},
"annotations": {
"description": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})",
"summary": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})"
},
"notificationSettings": {
"groupBy": [],
"usePolicy": false,
"renotify": {
"enabled": false,
"interval": "30m",
"alertStates": []
}
},
"version": "v5",
"schemaVersion": "v2alpha1"
}

View File

@@ -1,20 +0,0 @@
{ "timestamp": "2026-01-29T10:00:00.000000Z", "duration": "PT0.8S", "trace_id": "491f6d3d6b0a1f9e8a71b2c3d4e5f6a1", "span_id": "a1b2c3d4e5f6g7h8", "parent_span_id": "", "name": "POST /order", "kind": 2, "status_code": 1, "status_message": "", "resources": { "deployment.environment": "production", "service.name": "order-service", "os.type": "linux", "host.name": "linux-000" }, "attributes": { "net.transport": "IP.TCP", "http.scheme": "http", "http.user_agent": "Integration Test", "http.request.method": "POST", "http.response.status_code": "200", "http.request.path": "/order" } }
{ "timestamp": "2026-01-29T10:00:30.000000Z", "duration": "PT1.2S", "trace_id": "491f6d3d6b0a1f9e8a71b2c3d4e5f6a2", "span_id": "a2b3c4d5e6f7g8h9", "parent_span_id": "", "name": "POST /order", "kind": 2, "status_code": 1, "status_message": "", "resources": { "deployment.environment": "production", "service.name": "order-service", "os.type": "linux", "host.name": "linux-000" }, "attributes": { "net.transport": "IP.TCP", "http.scheme": "http", "http.user_agent": "Integration Test", "http.request.method": "POST", "http.response.status_code": "200", "http.request.path": "/order" } }
{ "timestamp": "2026-01-29T10:01:00.000000Z", "duration": "PT0.9S", "trace_id": "491f6d3d6b0a1f9e8a71b2c3d4e5f6a3", "span_id": "a3b4c5d6e7f8g9h0", "parent_span_id": "", "name": "POST /order", "kind": 2, "status_code": 1, "status_message": "", "resources": { "deployment.environment": "production", "service.name": "order-service", "os.type": "linux", "host.name": "linux-000" }, "attributes": { "net.transport": "IP.TCP", "http.scheme": "http", "http.user_agent": "Integration Test", "http.request.method": "POST", "http.response.status_code": "200", "http.request.path": "/order" } }
{ "timestamp": "2026-01-29T10:01:30.000000Z", "duration": "PT1.5S", "trace_id": "491f6d3d6b0a1f9e8a71b2c3d4e5f6a4", "span_id": "a4b5c6d7e8f9g0h1", "parent_span_id": "", "name": "POST /order", "kind": 2, "status_code": 1, "status_message": "", "resources": { "deployment.environment": "production", "service.name": "order-service", "os.type": "linux", "host.name": "linux-000" }, "attributes": { "net.transport": "IP.TCP", "http.scheme": "http", "http.user_agent": "Integration Test", "http.request.method": "POST", "http.response.status_code": "200", "http.request.path": "/order" } }
{ "timestamp": "2026-01-29T10:02:00.000000Z", "duration": "PT1.1S", "trace_id": "491f6d3d6b0a1f9e8a71b2c3d4e5f6a5", "span_id": "a5b6c7d8e9f0g1h2", "parent_span_id": "", "name": "POST /order", "kind": 2, "status_code": 1, "status_message": "", "resources": { "deployment.environment": "production", "service.name": "order-service", "os.type": "linux", "host.name": "linux-000" }, "attributes": { "net.transport": "IP.TCP", "http.scheme": "http", "http.user_agent": "Integration Test", "http.request.method": "POST", "http.response.status_code": "200", "http.request.path": "/order" } }
{ "timestamp": "2026-01-29T10:02:30.000000Z", "duration": "PT0.7S", "trace_id": "491f6d3d6b0a1f9e8a71b2c3d4e5f6a6", "span_id": "a6b7c8d9e0f1g2h3", "parent_span_id": "", "name": "POST /order", "kind": 2, "status_code": 1, "status_message": "", "resources": { "deployment.environment": "production", "service.name": "order-service", "os.type": "linux", "host.name": "linux-000" }, "attributes": { "net.transport": "IP.TCP", "http.scheme": "http", "http.user_agent": "Integration Test", "http.request.method": "POST", "http.response.status_code": "200", "http.request.path": "/order" } }
{ "timestamp": "2026-01-29T10:03:00.000000Z", "duration": "PT1.8S", "trace_id": "491f6d3d6b0a1f9e8a71b2c3d4e5f6a7", "span_id": "a7b8c9d0e1f2g3h4", "parent_span_id": "", "name": "POST /order", "kind": 2, "status_code": 1, "status_message": "", "resources": { "deployment.environment": "production", "service.name": "order-service", "os.type": "linux", "host.name": "linux-000" }, "attributes": { "net.transport": "IP.TCP", "http.scheme": "http", "http.user_agent": "Integration Test", "http.request.method": "POST", "http.response.status_code": "200", "http.request.path": "/order" } }
{ "timestamp": "2026-01-29T10:03:30.000000Z", "duration": "PT1.3S", "trace_id": "491f6d3d6b0a1f9e8a71b2c3d4e5f6a8", "span_id": "a8b9c0d1e2f3g4h5", "parent_span_id": "", "name": "POST /order", "kind": 2, "status_code": 1, "status_message": "", "resources": { "deployment.environment": "production", "service.name": "order-service", "os.type": "linux", "host.name": "linux-000" }, "attributes": { "net.transport": "IP.TCP", "http.scheme": "http", "http.user_agent": "Integration Test", "http.request.method": "POST", "http.response.status_code": "200", "http.request.path": "/order" } }
{ "timestamp": "2026-01-29T10:04:00.000000Z", "duration": "PT0.6S", "trace_id": "491f6d3d6b0a1f9e8a71b2c3d4e5f6a9", "span_id": "a9b0c1d2e3f4g5h6", "parent_span_id": "", "name": "POST /order", "kind": 2, "status_code": 1, "status_message": "", "resources": { "deployment.environment": "production", "service.name": "order-service", "os.type": "linux", "host.name": "linux-000" }, "attributes": { "net.transport": "IP.TCP", "http.scheme": "http", "http.user_agent": "Integration Test", "http.request.method": "POST", "http.response.status_code": "200", "http.request.path": "/order" } }
{ "timestamp": "2026-01-29T10:04:30.000000Z", "duration": "PT1.4S", "trace_id": "491f6d3d6b0a1f9e8a71b2c3d4e5f6b1", "span_id": "b1c2d3e4f5g6h7i8", "parent_span_id": "", "name": "POST /order", "kind": 2, "status_code": 1, "status_message": "", "resources": { "deployment.environment": "production", "service.name": "order-service", "os.type": "linux", "host.name": "linux-000" }, "attributes": { "net.transport": "IP.TCP", "http.scheme": "http", "http.user_agent": "Integration Test", "http.request.method": "POST", "http.response.status_code": "200", "http.request.path": "/order" } }
{ "timestamp": "2026-01-29T10:05:00.000000Z", "duration": "PT1.6S", "trace_id": "491f6d3d6b0a1f9e8a71b2c3d4e5f6b2", "span_id": "b2c3d4e5f6g7h8i9", "parent_span_id": "", "name": "POST /order", "kind": 2, "status_code": 1, "status_message": "", "resources": { "deployment.environment": "production", "service.name": "order-service", "os.type": "linux", "host.name": "linux-000" },"attributes": { "net.transport": "IP.TCP", "http.scheme": "http", "http.user_agent": "Integration Test", "http.request.method": "POST", "http.response.status_code": "200", "http.request.path": "/order" } }
{ "timestamp": "2026-01-29T10:05:30.000000Z", "duration": "PT0.85S", "trace_id": "491f6d3d6b0a1f9e8a71b2c3d4e5f6b3", "span_id": "b3c4d5e6f7g8h9i0", "parent_span_id": "", "name": "POST /order", "kind": 2, "status_code": 1, "status_message": "", "resources": { "deployment.environment": "production", "service.name": "order-service", "os.type": "linux", "host.name": "linux-000" }, "attributes": { "net.transport": "IP.TCP", "http.scheme": "http", "http.user_agent": "Integration Test", "http.request.method": "POST", "http.response.status_code": "200", "http.request.path": "/order" } }
{ "timestamp": "2026-01-29T10:06:00.000000Z", "duration": "PT1.7S", "trace_id": "491f6d3d6b0a1f9e8a71b2c3d4e5f6b4", "span_id": "b4c5d6e7f8g9h0i1", "parent_span_id": "", "name": "POST /order", "kind": 2, "status_code": 1, "status_message": "", "resources": { "deployment.environment": "production", "service.name": "order-service", "os.type": "linux", "host.name": "linux-000" }, "attributes": { "net.transport": "IP.TCP", "http.scheme": "http", "http.user_agent": "Integration Test", "http.request.method": "POST", "http.response.status_code": "200", "http.request.path": "/order" } }
{ "timestamp": "2026-01-29T10:06:30.000000Z", "duration": "PT1.25S", "trace_id": "491f6d3d6b0a1f9e8a71b2c3d4e5f6b5", "span_id": "b5c6d7e8f9g0h1i2", "parent_span_id": "", "name": "POST /order", "kind": 2, "status_code": 1, "status_message": "", "resources": { "deployment.environment": "production", "service.name": "order-service", "os.type": "linux", "host.name": "linux-000" }, "attributes": { "net.transport": "IP.TCP", "http.scheme": "http", "http.user_agent": "Integration Test", "http.request.method": "POST", "http.response.status_code": "200", "http.request.path": "/order" } }
{ "timestamp": "2026-01-29T10:07:00.000000Z", "duration": "PT0.95S", "trace_id": "491f6d3d6b0a1f9e8a71b2c3d4e5f6b6", "span_id": "b6c7d8e9f0g1h2i3", "parent_span_id": "", "name": "POST /order", "kind": 2, "status_code": 1, "status_message": "", "resources": { "deployment.environment": "production", "service.name": "order-service", "os.type": "linux", "host.name": "linux-000" }, "attributes": { "net.transport": "IP.TCP", "http.scheme": "http", "http.user_agent": "Integration Test", "http.request.method": "POST", "http.response.status_code": "200", "http.request.path": "/order" } }
{ "timestamp": "2026-01-29T10:07:30.000000Z", "duration": "PT1.9S", "trace_id": "491f6d3d6b0a1f9e8a71b2c3d4e5f6b7", "span_id": "b7c8d9e0f1g2h3i4", "parent_span_id": "", "name": "POST /order", "kind": 2, "status_code": 1, "status_message": "", "resources": { "deployment.environment": "production", "service.name": "order-service", "os.type": "linux", "host.name": "linux-000" }, "attributes": { "net.transport": "IP.TCP", "http.scheme": "http", "http.user_agent": "Integration Test", "http.request.method": "POST", "http.response.status_code": "200", "http.request.path": "/order" } }
{ "timestamp": "2026-01-29T10:08:00.000000Z", "duration": "PT1.35S", "trace_id": "491f6d3d6b0a1f9e8a71b2c3d4e5f6b8", "span_id": "b8c9d0e1f2g3h4i5", "parent_span_id": "", "name": "POST /order", "kind": 2, "status_code": 1, "status_message": "", "resources": { "deployment.environment": "production", "service.name": "order-service", "os.type": "linux", "host.name": "linux-000" }, "attributes": { "net.transport": "IP.TCP", "http.scheme": "http", "http.user_agent": "Integration Test", "http.request.method": "POST", "http.response.status_code": "200", "http.request.path": "/order" } }
{ "timestamp": "2026-01-29T10:08:30.000000Z", "duration": "PT0.75S", "trace_id": "491f6d3d6b0a1f9e8a71b2c3d4e5f6b9", "span_id": "b9c0d1e2f3g4h5i6", "parent_span_id": "", "name": "POST /order", "kind": 2, "status_code": 1, "status_message": "", "resources": { "deployment.environment": "production", "service.name": "order-service", "os.type": "linux", "host.name": "linux-000" }, "attributes": { "net.transport": "IP.TCP", "http.scheme": "http", "http.user_agent": "Integration Test", "http.request.method": "POST", "http.response.status_code": "200", "http.request.path": "/order" } }
{ "timestamp": "2026-01-29T10:09:00.000000Z", "duration": "PT1.55S", "trace_id": "491f6d3d6b0a1f9e8a71b2c3d4e5f6c1", "span_id": "c1d2e3f4g5h6i7j8", "parent_span_id": "", "name": "POST /order", "kind": 2, "status_code": 1, "status_message": "", "resources": { "deployment.environment": "production", "service.name": "order-service", "os.type": "linux", "host.name": "linux-000" }, "attributes": { "net.transport": "IP.TCP", "http.scheme": "http", "http.user_agent": "Integration Test", "http.request.method": "POST", "http.response.status_code": "200", "http.request.path": "/order" } }
{ "timestamp": "2026-01-29T10:10:00.000000Z", "duration": "PT1.65S", "trace_id": "491f6d3d6b0a1f9e8a71b2c3d4e5f6c2", "span_id": "c2d3e4f5g6h7i8j9", "parent_span_id": "", "name": "POST /order", "kind": 2, "status_code": 1, "status_message": "", "resources": { "deployment.environment": "production", "service.name": "order-service", "os.type": "linux", "host.name": "linux-000" }, "attributes": { "net.transport": "IP.TCP", "http.scheme": "http", "http.user_agent": "Integration Test", "http.request.method": "POST", "http.response.status_code": "200", "http.request.path": "/order" } }

View File

@@ -1,68 +0,0 @@
{
"alert": "threshold_above_average",
"ruleType": "threshold_rule",
"alertType": "TRACES_BASED_ALERT",
"condition": {
"thresholds": {
"kind": "basic",
"spec": [
{
"name": "critical",
"target": 1,
"matchType": "3",
"op": "1",
"channels": [
"test channel"
],
"targetUnit": "s"
}
]
},
"compositeQuery": {
"queryType": "builder",
"unit": "ns",
"panelType": "graph",
"queries": [
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "traces",
"filter": {
"expression": "http.request.path = '/order'"
},
"aggregations": [
{
"expression": "p90(duration_nano)"
}
]
}
}
]
},
"selectedQueryName": "A"
},
"evaluation": {
"kind": "rolling",
"spec": {
"evalWindow": "5m0s",
"frequency": "15s"
}
},
"labels": {},
"annotations": {
"description": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})",
"summary": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})"
},
"notificationSettings": {
"groupBy": [],
"usePolicy": false,
"renotify": {
"enabled": false,
"interval": "30m",
"alertStates": []
}
},
"version": "v5",
"schemaVersion": "v2alpha1"
}

View File

@@ -1,24 +0,0 @@
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:01:00+00:00","value":1,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"server","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:01:00+00:00","value":1,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:02:00+00:00","value":2,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"server","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:02:00+00:00","value":2,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:03:00+00:00","value":3,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"server","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:03:00+00:00","value":3,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:04:00+00:00","value":4,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"server","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:04:00+00:00","value":4,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:05:00+00:00","value":305,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"server","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:05:00+00:00","value":305,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:06:00+00:00","value":306,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"server","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:06:00+00:00","value":306,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:07:00+00:00","value":307,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"server","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:07:00+00:00","value":307,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:08:00+00:00","value":308,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"server","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:08:00+00:00","value":308,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:09:00+00:00","value":309,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"server","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:09:00+00:00","value":309,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:10:00+00:00","value":310,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"server","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:10:00+00:00","value":310,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:11:00+00:00","value":311,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"server","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:11:00+00:00","value":311,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"api","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:12:00+00:00","value":312,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"request_total_threshold_above_in_total","labels":{"service":"server","endpoint":"/health","status_code":"200"},"timestamp":"2026-01-29T10:12:00+00:00","value":312,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}

View File

@@ -1,74 +0,0 @@
{
"alert": "threshold_above_in_total",
"ruleType": "threshold_rule",
"alertType": "METRIC_BASED_ALERT",
"condition": {
"thresholds": {
"kind": "basic",
"spec": [
{
"name": "critical",
"target": 5,
"matchType": "4",
"op": "1",
"channels": [
"test channel"
]
}
]
},
"compositeQuery": {
"queryType": "builder",
"panelType": "graph",
"queries": [
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "metrics",
"groupBy": [
{
"name": "service",
"fieldDataType": "",
"fieldContext": ""
}
],
"aggregations": [
{
"metricName": "request_total_threshold_above_in_total",
"timeAggregation": "rate",
"spaceAggregation": "sum"
}
]
}
}
]
},
"selectedQueryName": "A"
},
"evaluation": {
"kind": "rolling",
"spec": {
"evalWindow": "5m0s",
"frequency": "15s"
}
},
"labels": {},
"annotations": {
"description": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})",
"summary": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})"
},
"notificationSettings": {
"groupBy": [
"service"
],
"usePolicy": false,
"renotify": {
"enabled": false,
"interval": "30m",
"alertStates": []
}
},
"version": "v5",
"schemaVersion": "v2alpha1"
}

View File

@@ -1,12 +0,0 @@
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:01:00+00:00","value":5,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:02:00+00:00","value":10,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:03:00+00:00","value":15,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:04:00+00:00","value":20,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:05:00+00:00","value":31,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:06:00+00:00","value":46,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:07:00+00:00","value":58,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:08:00+00:00","value":71,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:09:00+00:00","value":76,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:10:00+00:00","value":81,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:11:00+00:00","value":86,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}
{"metric_name":"disk_usage_threshold_above_last","labels":{"device":"/dev/sda1","mountpoint":"/"},"timestamp":"2026-01-29T10:12:00+00:00","value":91,"temporality":"Cumulative","type_":"Sum","is_monotonic":true,"flags":0,"description":"","unit":"","env":"default","resource_attrs":{},"scope_attrs":{}}

View File

@@ -1,65 +0,0 @@
{
"alert": "threshold_above_last",
"ruleType": "threshold_rule",
"alertType": "METRIC_BASED_ALERT",
"condition": {
"thresholds": {
"kind": "basic",
"spec": [
{
"name": "critical",
"target": 10,
"matchType": "5",
"op": "1",
"channels": [
"test channel"
]
}
]
},
"compositeQuery": {
"queryType": "builder",
"panelType": "graph",
"queries": [
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "metrics",
"aggregations": [
{
"metricName": "disk_usage_threshold_above_last",
"timeAggregation": "latest",
"spaceAggregation": "max"
}
]
}
}
]
},
"selectedQueryName": "A"
},
"evaluation": {
"kind": "rolling",
"spec": {
"evalWindow": "5m0s",
"frequency": "15s"
}
},
"labels": {},
"annotations": {
"description": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})",
"summary": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})"
},
"notificationSettings": {
"groupBy": [],
"usePolicy": false,
"renotify": {
"enabled": false,
"interval": "30m",
"alertStates": []
}
},
"version": "v5",
"schemaVersion": "v2alpha1"
}

View File

@@ -1,18 +0,0 @@
{ "timestamp": "2026-01-29T10:00:00.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "payment success", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:00:02.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "payment success", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:01:00.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "Database connection established", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:01:30.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "payment success", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:02:00.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "API request received", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:02:30.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "payment success", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:03:00.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "Request validation completed", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:03:30.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "payment success", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:04:00.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "Cache updated successfully", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:04:30.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "payment success", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:05:00.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "Rate limit check passed", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:05:30.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "payment success", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:06:00.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "Authentication token validated", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:06:30.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "payment success", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:07:00.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "Query executed successfully", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:08:00.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "Response sent to client", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:09:00.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "Metrics collected", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:10:00.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "Background job started", "severity_text": "INFO" }

View File

@@ -1,66 +0,0 @@
{
"alert": "threshold_below_all_the_time",
"ruleType": "threshold_rule",
"alertType": "LOGS_BASED_ALERT",
"condition": {
"thresholds": {
"kind": "basic",
"spec": [
{
"name": "critical",
"target": 10,
"matchType": "2",
"op": "2",
"channels": [
"test channel"
]
}
]
},
"compositeQuery": {
"queryType": "builder",
"panelType": "graph",
"queries": [
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "logs",
"filter": {
"expression": "body CONTAINS 'payment success'"
},
"aggregations": [
{
"expression": "count()"
}
]
}
}
]
},
"selectedQueryName": "A"
},
"evaluation": {
"kind": "rolling",
"spec": {
"evalWindow": "5m0s",
"frequency": "15s"
}
},
"labels": {},
"annotations": {
"description": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})",
"summary": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})"
},
"notificationSettings": {
"groupBy": [],
"usePolicy": false,
"renotify": {
"enabled": false,
"interval": "30m",
"alertStates": []
}
},
"version": "v5",
"schemaVersion": "v2alpha1"
}

View File

@@ -1,20 +0,0 @@
{ "timestamp": "2026-01-29T10:00:00.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "User login successful", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:00:30.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "payment success", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:01:00.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "payment success", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:01:30.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "Database connection established", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:02:00.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "payment success", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:02:30.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "payment success", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:03:00.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "API request received", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:03:30.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "payment success", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:04:00.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "payment success", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:04:30.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "Cache updated successfully", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:05:00.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "payment success", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:05:30.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "Request validation completed", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:06:00.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "payment success", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:06:30.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "Authentication token validated", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:07:00.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "payment success", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:07:30.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "Query executed successfully", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:08:00.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "Response sent to client", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:08:30.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "Health check endpoint called", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:09:00.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "Metrics collected", "severity_text": "INFO" }
{ "timestamp": "2026-01-29T10:10:00.000000Z", "resources": { "service.name": "payment-service" }, "attributes": { "code.file": "payment_handler.py" }, "body": "Background job started", "severity_text": "INFO" }

View File

@@ -1,66 +0,0 @@
{
"alert": "threshold_below_at_least_once",
"ruleType": "threshold_rule",
"alertType": "LOGS_BASED_ALERT",
"condition": {
"thresholds": {
"kind": "basic",
"spec": [
{
"name": "critical",
"target": 10,
"matchType": "1",
"op": "2",
"channels": [
"test channel"
]
}
]
},
"compositeQuery": {
"queryType": "builder",
"panelType": "graph",
"queries": [
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "logs",
"filter": {
"expression": "body CONTAINS 'payment success'"
},
"aggregations": [
{
"expression": "count()"
}
]
}
}
]
},
"selectedQueryName": "A"
},
"evaluation": {
"kind": "rolling",
"spec": {
"evalWindow": "5m0s",
"frequency": "15s"
}
},
"labels": {},
"annotations": {
"description": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})",
"summary": "This alert is fired when the defined metric (current value: {{$value}}) crosses the threshold ({{$threshold}})"
},
"notificationSettings": {
"groupBy": [],
"usePolicy": false,
"renotify": {
"enabled": false,
"interval": "30m",
"alertStates": []
}
},
"version": "v5",
"schemaVersion": "v2alpha1"
}

Some files were not shown because too many files have changed in this diff Show More