mirror of
https://github.com/SigNoz/signoz.git
synced 2026-02-27 02:32:53 +00:00
Compare commits
18 Commits
fix/remove
...
nitya/clou
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a9d33e83a2 | ||
|
|
afcf2c6053 | ||
|
|
82c54b1d36 | ||
|
|
39f5fb7290 | ||
|
|
6ec2989e5c | ||
|
|
016da679b9 | ||
|
|
ff028e366b | ||
|
|
ec59fecdda | ||
|
|
c579614d56 | ||
|
|
78ba2ba356 | ||
|
|
7fd4762e2a | ||
|
|
4e4c9ce5af | ||
|
|
7605775a38 | ||
|
|
5a762c678e | ||
|
|
55b1311f78 | ||
|
|
59668698a2 | ||
|
|
22ed687d44 | ||
|
|
1da016cf1a |
@@ -190,7 +190,7 @@ services:
|
|||||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
signoz:
|
signoz:
|
||||||
!!merge <<: *db-depend
|
!!merge <<: *db-depend
|
||||||
image: signoz/signoz:v0.112.1
|
image: signoz/signoz:v0.113.0
|
||||||
ports:
|
ports:
|
||||||
- "8080:8080" # signoz port
|
- "8080:8080" # signoz port
|
||||||
# - "6060:6060" # pprof port
|
# - "6060:6060" # pprof port
|
||||||
@@ -213,7 +213,7 @@ services:
|
|||||||
retries: 3
|
retries: 3
|
||||||
otel-collector:
|
otel-collector:
|
||||||
!!merge <<: *db-depend
|
!!merge <<: *db-depend
|
||||||
image: signoz/signoz-otel-collector:v0.142.1
|
image: signoz/signoz-otel-collector:v0.144.1
|
||||||
entrypoint:
|
entrypoint:
|
||||||
- /bin/sh
|
- /bin/sh
|
||||||
command:
|
command:
|
||||||
@@ -241,7 +241,7 @@ services:
|
|||||||
replicas: 3
|
replicas: 3
|
||||||
signoz-telemetrystore-migrator:
|
signoz-telemetrystore-migrator:
|
||||||
!!merge <<: *db-depend
|
!!merge <<: *db-depend
|
||||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.142.0}
|
image: signoz/signoz-otel-collector:v0.144.1
|
||||||
environment:
|
environment:
|
||||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
|
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
|
||||||
|
|||||||
@@ -117,7 +117,7 @@ services:
|
|||||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
signoz:
|
signoz:
|
||||||
!!merge <<: *db-depend
|
!!merge <<: *db-depend
|
||||||
image: signoz/signoz:v0.112.1
|
image: signoz/signoz:v0.113.0
|
||||||
ports:
|
ports:
|
||||||
- "8080:8080" # signoz port
|
- "8080:8080" # signoz port
|
||||||
volumes:
|
volumes:
|
||||||
@@ -139,7 +139,7 @@ services:
|
|||||||
retries: 3
|
retries: 3
|
||||||
otel-collector:
|
otel-collector:
|
||||||
!!merge <<: *db-depend
|
!!merge <<: *db-depend
|
||||||
image: signoz/signoz-otel-collector:v0.142.1
|
image: signoz/signoz-otel-collector:v0.144.1
|
||||||
entrypoint:
|
entrypoint:
|
||||||
- /bin/sh
|
- /bin/sh
|
||||||
command:
|
command:
|
||||||
@@ -167,7 +167,7 @@ services:
|
|||||||
replicas: 3
|
replicas: 3
|
||||||
signoz-telemetrystore-migrator:
|
signoz-telemetrystore-migrator:
|
||||||
!!merge <<: *db-depend
|
!!merge <<: *db-depend
|
||||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.142.0}
|
image: signoz/signoz-otel-collector:v0.144.1
|
||||||
environment:
|
environment:
|
||||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
|
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
|
||||||
|
|||||||
@@ -82,6 +82,12 @@ exporters:
|
|||||||
timeout: 45s
|
timeout: 45s
|
||||||
sending_queue:
|
sending_queue:
|
||||||
enabled: false
|
enabled: false
|
||||||
|
metadataexporter:
|
||||||
|
cache:
|
||||||
|
provider: in_memory
|
||||||
|
dsn: tcp://clickhouse:9000/signoz_metadata
|
||||||
|
enabled: true
|
||||||
|
timeout: 45s
|
||||||
service:
|
service:
|
||||||
telemetry:
|
telemetry:
|
||||||
logs:
|
logs:
|
||||||
@@ -93,19 +99,19 @@ service:
|
|||||||
traces:
|
traces:
|
||||||
receivers: [otlp]
|
receivers: [otlp]
|
||||||
processors: [signozspanmetrics/delta, batch]
|
processors: [signozspanmetrics/delta, batch]
|
||||||
exporters: [clickhousetraces, signozmeter]
|
exporters: [clickhousetraces, metadataexporter, signozmeter]
|
||||||
metrics:
|
metrics:
|
||||||
receivers: [otlp]
|
receivers: [otlp]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
exporters: [signozclickhousemetrics, signozmeter]
|
exporters: [signozclickhousemetrics, metadataexporter, signozmeter]
|
||||||
metrics/prometheus:
|
metrics/prometheus:
|
||||||
receivers: [prometheus]
|
receivers: [prometheus]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
exporters: [signozclickhousemetrics, signozmeter]
|
exporters: [signozclickhousemetrics, metadataexporter, signozmeter]
|
||||||
logs:
|
logs:
|
||||||
receivers: [otlp]
|
receivers: [otlp]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
exporters: [clickhouselogsexporter, signozmeter]
|
exporters: [clickhouselogsexporter, metadataexporter, signozmeter]
|
||||||
metrics/meter:
|
metrics/meter:
|
||||||
receivers: [signozmeter]
|
receivers: [signozmeter]
|
||||||
processors: [batch/meter]
|
processors: [batch/meter]
|
||||||
|
|||||||
@@ -181,7 +181,7 @@ services:
|
|||||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
signoz:
|
signoz:
|
||||||
!!merge <<: *db-depend
|
!!merge <<: *db-depend
|
||||||
image: signoz/signoz:${VERSION:-v0.112.1}
|
image: signoz/signoz:${VERSION:-v0.113.0}
|
||||||
container_name: signoz
|
container_name: signoz
|
||||||
ports:
|
ports:
|
||||||
- "8080:8080" # signoz port
|
- "8080:8080" # signoz port
|
||||||
@@ -204,7 +204,7 @@ services:
|
|||||||
retries: 3
|
retries: 3
|
||||||
otel-collector:
|
otel-collector:
|
||||||
!!merge <<: *db-depend
|
!!merge <<: *db-depend
|
||||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.142.1}
|
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.144.1}
|
||||||
container_name: signoz-otel-collector
|
container_name: signoz-otel-collector
|
||||||
entrypoint:
|
entrypoint:
|
||||||
- /bin/sh
|
- /bin/sh
|
||||||
@@ -229,7 +229,7 @@ services:
|
|||||||
- "4318:4318" # OTLP HTTP receiver
|
- "4318:4318" # OTLP HTTP receiver
|
||||||
signoz-telemetrystore-migrator:
|
signoz-telemetrystore-migrator:
|
||||||
!!merge <<: *db-depend
|
!!merge <<: *db-depend
|
||||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.142.0}
|
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.144.1}
|
||||||
container_name: signoz-telemetrystore-migrator
|
container_name: signoz-telemetrystore-migrator
|
||||||
environment:
|
environment:
|
||||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||||
|
|||||||
@@ -109,7 +109,7 @@ services:
|
|||||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||||
signoz:
|
signoz:
|
||||||
!!merge <<: *db-depend
|
!!merge <<: *db-depend
|
||||||
image: signoz/signoz:${VERSION:-v0.112.1}
|
image: signoz/signoz:${VERSION:-v0.113.0}
|
||||||
container_name: signoz
|
container_name: signoz
|
||||||
ports:
|
ports:
|
||||||
- "8080:8080" # signoz port
|
- "8080:8080" # signoz port
|
||||||
@@ -132,7 +132,7 @@ services:
|
|||||||
retries: 3
|
retries: 3
|
||||||
otel-collector:
|
otel-collector:
|
||||||
!!merge <<: *db-depend
|
!!merge <<: *db-depend
|
||||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.142.1}
|
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.144.1}
|
||||||
container_name: signoz-otel-collector
|
container_name: signoz-otel-collector
|
||||||
entrypoint:
|
entrypoint:
|
||||||
- /bin/sh
|
- /bin/sh
|
||||||
@@ -157,7 +157,7 @@ services:
|
|||||||
- "4318:4318" # OTLP HTTP receiver
|
- "4318:4318" # OTLP HTTP receiver
|
||||||
signoz-telemetrystore-migrator:
|
signoz-telemetrystore-migrator:
|
||||||
!!merge <<: *db-depend
|
!!merge <<: *db-depend
|
||||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.142.0}
|
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.144.1}
|
||||||
container_name: signoz-telemetrystore-migrator
|
container_name: signoz-telemetrystore-migrator
|
||||||
environment:
|
environment:
|
||||||
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
|
||||||
|
|||||||
@@ -82,6 +82,12 @@ exporters:
|
|||||||
timeout: 45s
|
timeout: 45s
|
||||||
sending_queue:
|
sending_queue:
|
||||||
enabled: false
|
enabled: false
|
||||||
|
metadataexporter:
|
||||||
|
cache:
|
||||||
|
provider: in_memory
|
||||||
|
dsn: tcp://clickhouse:9000/signoz_metadata
|
||||||
|
enabled: true
|
||||||
|
timeout: 45s
|
||||||
service:
|
service:
|
||||||
telemetry:
|
telemetry:
|
||||||
logs:
|
logs:
|
||||||
@@ -93,19 +99,19 @@ service:
|
|||||||
traces:
|
traces:
|
||||||
receivers: [otlp]
|
receivers: [otlp]
|
||||||
processors: [signozspanmetrics/delta, batch]
|
processors: [signozspanmetrics/delta, batch]
|
||||||
exporters: [clickhousetraces, signozmeter]
|
exporters: [clickhousetraces, metadataexporter, signozmeter]
|
||||||
metrics:
|
metrics:
|
||||||
receivers: [otlp]
|
receivers: [otlp]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
exporters: [signozclickhousemetrics, signozmeter]
|
exporters: [signozclickhousemetrics, metadataexporter, signozmeter]
|
||||||
metrics/prometheus:
|
metrics/prometheus:
|
||||||
receivers: [prometheus]
|
receivers: [prometheus]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
exporters: [signozclickhousemetrics, signozmeter]
|
exporters: [signozclickhousemetrics, metadataexporter, signozmeter]
|
||||||
logs:
|
logs:
|
||||||
receivers: [otlp]
|
receivers: [otlp]
|
||||||
processors: [batch]
|
processors: [batch]
|
||||||
exporters: [clickhouselogsexporter, signozmeter]
|
exporters: [clickhouselogsexporter, metadataexporter, signozmeter]
|
||||||
metrics/meter:
|
metrics/meter:
|
||||||
receivers: [signozmeter]
|
receivers: [signozmeter]
|
||||||
processors: [batch/meter]
|
processors: [batch/meter]
|
||||||
|
|||||||
164
ee/modules/cloudintegrations/implawsprovider/provider.go
Normal file
164
ee/modules/cloudintegrations/implawsprovider/provider.go
Normal file
@@ -0,0 +1,164 @@
|
|||||||
|
package implawsprovider
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"slices"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/modules/cloudintegrations"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ cloudintegrations.CloudProvider = (*AWSProvider)(nil)
|
||||||
|
|
||||||
|
type AWSProvider struct {
|
||||||
|
store integrationtypes.Store
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAWSProvider(store integrationtypes.Store) *AWSProvider {
|
||||||
|
return &AWSProvider{store: store}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AWSProvider) AgentCheckIn(ctx context.Context, req *cloudintegrations.PostableAgentCheckInPayload) (any, error) {
|
||||||
|
// if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||||
|
// return nil, apiErr
|
||||||
|
// }
|
||||||
|
|
||||||
|
// existingAccount, apiErr := c.accountsRepo.get(ctx, orgId, cloudProvider, req.ID)
|
||||||
|
// if existingAccount != nil && existingAccount.AccountID != nil && *existingAccount.AccountID != req.AccountID {
|
||||||
|
// return nil, model.BadRequest(fmt.Errorf(
|
||||||
|
// "can't check in with new %s account id %s for account %s with existing %s id %s",
|
||||||
|
// cloudProvider, req.AccountID, existingAccount.ID.StringValue(), cloudProvider, *existingAccount.AccountID,
|
||||||
|
// ))
|
||||||
|
// }
|
||||||
|
|
||||||
|
// existingAccount, apiErr = c.accountsRepo.getConnectedCloudAccount(ctx, orgId, cloudProvider, req.AccountID)
|
||||||
|
// if existingAccount != nil && existingAccount.ID.StringValue() != req.ID {
|
||||||
|
// return nil, model.BadRequest(fmt.Errorf(
|
||||||
|
// "can't check in to %s account %s with id %s. already connected with id %s",
|
||||||
|
// cloudProvider, req.AccountID, req.ID, existingAccount.ID.StringValue(),
|
||||||
|
// ))
|
||||||
|
// }
|
||||||
|
|
||||||
|
// agentReport := types.AgentReport{
|
||||||
|
// TimestampMillis: time.Now().UnixMilli(),
|
||||||
|
// Data: req.Data,
|
||||||
|
// }
|
||||||
|
|
||||||
|
// account, apiErr := c.accountsRepo.upsert(
|
||||||
|
// ctx, orgId, cloudProvider, &req.ID, nil, &req.AccountID, &agentReport, nil,
|
||||||
|
// )
|
||||||
|
// if apiErr != nil {
|
||||||
|
// return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
|
||||||
|
// }
|
||||||
|
|
||||||
|
// // prepare and return integration config to be consumed by agent
|
||||||
|
// compiledStrategy, err := NewCompiledCollectionStrategy(cloudProvider)
|
||||||
|
// if err != nil {
|
||||||
|
// return nil, model.InternalError(fmt.Errorf(
|
||||||
|
// "couldn't init telemetry collection strategy: %w", err,
|
||||||
|
// ))
|
||||||
|
// }
|
||||||
|
|
||||||
|
// agentConfig := IntegrationConfigForAgent{
|
||||||
|
// EnabledRegions: []string{},
|
||||||
|
// TelemetryCollectionStrategy: compiledStrategy,
|
||||||
|
// }
|
||||||
|
|
||||||
|
// if account.Config != nil && account.Config.EnabledRegions != nil {
|
||||||
|
// agentConfig.EnabledRegions = account.Config.EnabledRegions
|
||||||
|
// }
|
||||||
|
|
||||||
|
// services, err := services.Map(cloudProvider)
|
||||||
|
// if err != nil {
|
||||||
|
// return nil, err
|
||||||
|
// }
|
||||||
|
|
||||||
|
// svcConfigs, apiErr := c.serviceConfigRepo.getAllForAccount(
|
||||||
|
// ctx, orgId, account.ID.StringValue(),
|
||||||
|
// )
|
||||||
|
// if apiErr != nil {
|
||||||
|
// return nil, model.WrapApiError(
|
||||||
|
// apiErr, "couldn't get service configs for cloud account",
|
||||||
|
// )
|
||||||
|
// }
|
||||||
|
|
||||||
|
// // accumulate config in a fixed order to ensure same config generated across runs
|
||||||
|
// configuredServices := maps.Keys(svcConfigs)
|
||||||
|
// slices.Sort(configuredServices)
|
||||||
|
|
||||||
|
// for _, svcType := range configuredServices {
|
||||||
|
// definition, ok := services[svcType]
|
||||||
|
// if !ok {
|
||||||
|
// continue
|
||||||
|
// }
|
||||||
|
// config := svcConfigs[svcType]
|
||||||
|
|
||||||
|
// err := AddServiceStrategy(svcType, compiledStrategy, definition.Strategy, config)
|
||||||
|
// if err != nil {
|
||||||
|
// return nil, err
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
// return &AgentCheckInResponse{
|
||||||
|
// AccountId: account.ID.StringValue(),
|
||||||
|
// CloudAccountId: *account.AccountID,
|
||||||
|
// RemovedAt: account.RemovedAt,
|
||||||
|
// IntegrationConfig: agentConfig,
|
||||||
|
// }, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AWSProvider) ListServices(ctx context.Context, orgID string, cloudAccountID *string) (any, error) {
|
||||||
|
svcConfigs := make(map[string]*integrationtypes.AWSServiceConfig)
|
||||||
|
if cloudAccountID != nil {
|
||||||
|
activeAccount, err := a.store.GetConnectedCloudAccount(ctx, orgID, a.GetName().String(), *cloudAccountID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
serviceConfigs, err := a.ServiceConfigRepo.GetAllForAccount(ctx, orgID, activeAccount.ID.String())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for svcType, config := range serviceConfigs {
|
||||||
|
serviceConfig := new(integrationtypes.AWSServiceConfig)
|
||||||
|
err = integrationtypes.UnmarshalJSON(config, serviceConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
svcConfigs[svcType] = serviceConfig
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
summaries := make([]integrationtypes.AWSServiceSummary, 0)
|
||||||
|
|
||||||
|
definitions, err := a.ServiceDefinitions.ListServiceDefinitions(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, def := range definitions {
|
||||||
|
summary := integrationtypes.AWSServiceSummary{
|
||||||
|
DefinitionMetadata: def.DefinitionMetadata,
|
||||||
|
Config: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
summary.Config = svcConfigs[summary.Id]
|
||||||
|
|
||||||
|
summaries = append(summaries, summary)
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.SortFunc(summaries, func(a, b integrationtypes.AWSServiceSummary) int {
|
||||||
|
if a.DefinitionMetadata.Title < b.DefinitionMetadata.Title {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if a.DefinitionMetadata.Title > b.DefinitionMetadata.Title {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
})
|
||||||
|
|
||||||
|
return &integrationtypes.GettableAWSServices{
|
||||||
|
Services: summaries,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
36
ee/modules/cloudintegrations/implcloudintegrations/module.go
Normal file
36
ee/modules/cloudintegrations/implcloudintegrations/module.go
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
package implcloudintegrations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
|
"github.com/SigNoz/signoz/pkg/modules/cloudintegrations"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||||
|
)
|
||||||
|
|
||||||
|
type module struct {
|
||||||
|
store integrationtypes.Store
|
||||||
|
providers map[integrationtypes.CloudProviderType]cloudintegrations.CloudProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewModule(store integrationtypes.Store, providers map[integrationtypes.CloudProviderType]cloudintegrations.CloudProvider) cloudintegrations.Module {
|
||||||
|
return &module{store: store}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *module) ListServices(ctx context.Context, orgID string, cloudProvider string, cloudAccountId *string) (any, error) {
|
||||||
|
|
||||||
|
provider, err := m.getProvider(cloudProvider)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return provider.ListServices(ctx, orgID, cloudAccountId)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *module) getProvider(cloudProvider integrationtypes.CloudProviderType) (cloudintegrations.CloudProvider, error) {
|
||||||
|
provider, ok := m.providers[cloudProvider]
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid cloud provider: %s", cloudProvider)
|
||||||
|
}
|
||||||
|
return provider, nil
|
||||||
|
}
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"log/slog"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -10,7 +11,6 @@ import (
|
|||||||
"github.com/SigNoz/signoz/pkg/global"
|
"github.com/SigNoz/signoz/pkg/global"
|
||||||
"github.com/SigNoz/signoz/pkg/http/middleware"
|
"github.com/SigNoz/signoz/pkg/http/middleware"
|
||||||
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
|
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
|
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
|
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||||
@@ -27,12 +27,12 @@ type APIHandlerOptions struct {
|
|||||||
RulesManager *rules.Manager
|
RulesManager *rules.Manager
|
||||||
UsageManager *usage.Manager
|
UsageManager *usage.Manager
|
||||||
IntegrationsController *integrations.Controller
|
IntegrationsController *integrations.Controller
|
||||||
CloudIntegrationsController *cloudintegrations.Controller
|
|
||||||
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
||||||
GatewayUrl string
|
GatewayUrl string
|
||||||
// Querier Influx Interval
|
// Querier Influx Interval
|
||||||
FluxInterval time.Duration
|
FluxInterval time.Duration
|
||||||
GlobalConfig global.Config
|
GlobalConfig global.Config
|
||||||
|
Logger *slog.Logger // this is present in Signoz.Instrumentation but adding for quick access
|
||||||
}
|
}
|
||||||
|
|
||||||
type APIHandler struct {
|
type APIHandler struct {
|
||||||
@@ -46,13 +46,13 @@ func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz, config signoz.
|
|||||||
Reader: opts.DataConnector,
|
Reader: opts.DataConnector,
|
||||||
RuleManager: opts.RulesManager,
|
RuleManager: opts.RulesManager,
|
||||||
IntegrationsController: opts.IntegrationsController,
|
IntegrationsController: opts.IntegrationsController,
|
||||||
CloudIntegrationsController: opts.CloudIntegrationsController,
|
|
||||||
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
||||||
FluxInterval: opts.FluxInterval,
|
FluxInterval: opts.FluxInterval,
|
||||||
AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager),
|
AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager),
|
||||||
LicensingAPI: httplicensing.NewLicensingAPI(signoz.Licensing),
|
LicensingAPI: httplicensing.NewLicensingAPI(signoz.Licensing),
|
||||||
Signoz: signoz,
|
Signoz: signoz,
|
||||||
QueryParserAPI: queryparser.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.QueryParser),
|
QueryParserAPI: queryparser.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.QueryParser),
|
||||||
|
Logger: opts.Logger,
|
||||||
}, config)
|
}, config)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -101,14 +101,12 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *middleware.AuthZ) {
|
func (ah *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *middleware.AuthZ) {
|
||||||
|
|
||||||
ah.APIHandler.RegisterCloudIntegrationsRoutes(router, am)
|
ah.APIHandler.RegisterCloudIntegrationsRoutes(router, am)
|
||||||
|
|
||||||
router.HandleFunc(
|
router.HandleFunc(
|
||||||
"/api/v1/cloud-integrations/{cloudProvider}/accounts/generate-connection-params",
|
"/api/v1/cloud-integrations/{cloudProvider}/accounts/generate-connection-params",
|
||||||
am.EditAccess(ah.CloudIntegrationsGenerateConnectionParams),
|
am.EditAccess(ah.CloudIntegrationsGenerateConnectionParams),
|
||||||
).Methods(http.MethodGet)
|
).Methods(http.MethodGet)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"log/slog"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -13,20 +14,14 @@ import (
|
|||||||
"github.com/SigNoz/signoz/pkg/errors"
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
"github.com/SigNoz/signoz/pkg/http/render"
|
"github.com/SigNoz/signoz/pkg/http/render"
|
||||||
"github.com/SigNoz/signoz/pkg/modules/user"
|
"github.com/SigNoz/signoz/pkg/modules/user"
|
||||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
|
||||||
"github.com/SigNoz/signoz/pkg/types"
|
"github.com/SigNoz/signoz/pkg/types"
|
||||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||||
"github.com/SigNoz/signoz/pkg/valuer"
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type CloudIntegrationConnectionParamsResponse struct {
|
// TODO: move this file with other cloud integration related code
|
||||||
IngestionUrl string `json:"ingestion_url,omitempty"`
|
|
||||||
IngestionKey string `json:"ingestion_key,omitempty"`
|
|
||||||
SigNozAPIUrl string `json:"signoz_api_url,omitempty"`
|
|
||||||
SigNozAPIKey string `json:"signoz_api_key,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseWriter, r *http.Request) {
|
func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseWriter, r *http.Request) {
|
||||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||||
@@ -41,23 +36,21 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
cloudProviderString := mux.Vars(r)["cloudProvider"]
|
||||||
if cloudProvider != "aws" {
|
|
||||||
RespondError(w, basemodel.BadRequest(fmt.Errorf(
|
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
|
||||||
"cloud provider not supported: %s", cloudProvider,
|
if err != nil {
|
||||||
)), nil)
|
render.Error(w, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
apiKey, apiErr := ah.getOrCreateCloudIntegrationPAT(r.Context(), claims.OrgID, cloudProvider)
|
apiKey, err := ah.getOrCreateCloudIntegrationPAT(r.Context(), claims.OrgID, cloudProvider)
|
||||||
if apiErr != nil {
|
if err != nil {
|
||||||
RespondError(w, basemodel.WrapApiError(
|
render.Error(w, err)
|
||||||
apiErr, "couldn't provision PAT for cloud integration:",
|
|
||||||
), nil)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
result := CloudIntegrationConnectionParamsResponse{
|
result := integrationtypes.GettableCloudIntegrationConnectionParams{
|
||||||
SigNozAPIKey: apiKey,
|
SigNozAPIKey: apiKey,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -71,16 +64,17 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
|
|||||||
// Return the API Key (PAT) even if the rest of the params can not be deduced.
|
// Return the API Key (PAT) even if the rest of the params can not be deduced.
|
||||||
// Params not returned from here will be requested from the user via form inputs.
|
// Params not returned from here will be requested from the user via form inputs.
|
||||||
// This enables gracefully degraded but working experience even for non-cloud deployments.
|
// This enables gracefully degraded but working experience even for non-cloud deployments.
|
||||||
zap.L().Info("ingestion params and signoz api url can not be deduced since no license was found")
|
ah.opts.Logger.InfoContext(
|
||||||
ah.Respond(w, result)
|
r.Context(),
|
||||||
|
"ingestion params and signoz api url can not be deduced since no license was found",
|
||||||
|
)
|
||||||
|
render.Success(w, http.StatusOK, result)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
signozApiUrl, apiErr := ah.getIngestionUrlAndSigNozAPIUrl(r.Context(), license.Key)
|
signozApiUrl, err := ah.getIngestionUrlAndSigNozAPIUrl(r.Context(), license.Key)
|
||||||
if apiErr != nil {
|
if err != nil {
|
||||||
RespondError(w, basemodel.WrapApiError(
|
render.Error(w, err)
|
||||||
apiErr, "couldn't deduce ingestion url and signoz api url",
|
|
||||||
), nil)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -89,48 +83,41 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
|
|||||||
|
|
||||||
gatewayUrl := ah.opts.GatewayUrl
|
gatewayUrl := ah.opts.GatewayUrl
|
||||||
if len(gatewayUrl) > 0 {
|
if len(gatewayUrl) > 0 {
|
||||||
|
ingestionKeyString, err := ah.getOrCreateCloudProviderIngestionKey(
|
||||||
ingestionKey, apiErr := getOrCreateCloudProviderIngestionKey(
|
|
||||||
r.Context(), gatewayUrl, license.Key, cloudProvider,
|
r.Context(), gatewayUrl, license.Key, cloudProvider,
|
||||||
)
|
)
|
||||||
if apiErr != nil {
|
if err != nil {
|
||||||
RespondError(w, basemodel.WrapApiError(
|
render.Error(w, err)
|
||||||
apiErr, "couldn't get or create ingestion key",
|
|
||||||
), nil)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
result.IngestionKey = ingestionKey
|
result.IngestionKey = ingestionKeyString
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
zap.L().Info("ingestion key can't be deduced since no gateway url has been configured")
|
ah.opts.Logger.InfoContext(
|
||||||
|
r.Context(),
|
||||||
|
"ingestion key can't be deduced since no gateway url has been configured",
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
ah.Respond(w, result)
|
render.Success(w, http.StatusOK, result)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId string, cloudProvider string) (
|
func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId string, cloudProvider valuer.String) (string, error) {
|
||||||
string, *basemodel.ApiError,
|
|
||||||
) {
|
|
||||||
integrationPATName := fmt.Sprintf("%s integration", cloudProvider)
|
integrationPATName := fmt.Sprintf("%s integration", cloudProvider)
|
||||||
|
|
||||||
integrationUser, apiErr := ah.getOrCreateCloudIntegrationUser(ctx, orgId, cloudProvider)
|
integrationUser, err := ah.getOrCreateCloudIntegrationUser(ctx, orgId, cloudProvider)
|
||||||
if apiErr != nil {
|
if err != nil {
|
||||||
return "", apiErr
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
orgIdUUID, err := valuer.NewUUID(orgId)
|
orgIdUUID, err := valuer.NewUUID(orgId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", basemodel.InternalError(fmt.Errorf(
|
return "", err
|
||||||
"couldn't parse orgId: %w", err,
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
allPats, err := ah.Signoz.Modules.User.ListAPIKeys(ctx, orgIdUUID)
|
allPats, err := ah.Signoz.Modules.User.ListAPIKeys(ctx, orgIdUUID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", basemodel.InternalError(fmt.Errorf(
|
return "", err
|
||||||
"couldn't list PATs: %w", err,
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
for _, p := range allPats {
|
for _, p := range allPats {
|
||||||
if p.UserID == integrationUser.ID && p.Name == integrationPATName {
|
if p.UserID == integrationUser.ID && p.Name == integrationPATName {
|
||||||
@@ -138,9 +125,10 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.L().Info(
|
ah.opts.Logger.InfoContext(
|
||||||
|
ctx,
|
||||||
"no PAT found for cloud integration, creating a new one",
|
"no PAT found for cloud integration, creating a new one",
|
||||||
zap.String("cloudProvider", cloudProvider),
|
slog.String("cloudProvider", cloudProvider.String()),
|
||||||
)
|
)
|
||||||
|
|
||||||
newPAT, err := types.NewStorableAPIKey(
|
newPAT, err := types.NewStorableAPIKey(
|
||||||
@@ -150,68 +138,48 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
|
|||||||
0,
|
0,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", basemodel.InternalError(fmt.Errorf(
|
return "", err
|
||||||
"couldn't create cloud integration PAT: %w", err,
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ah.Signoz.Modules.User.CreateAPIKey(ctx, newPAT)
|
err = ah.Signoz.Modules.User.CreateAPIKey(ctx, newPAT)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", basemodel.InternalError(fmt.Errorf(
|
return "", err
|
||||||
"couldn't create cloud integration PAT: %w", err,
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
return newPAT.Token, nil
|
return newPAT.Token, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) getOrCreateCloudIntegrationUser(
|
// TODO: move this function out of handler and use proper module structure
|
||||||
ctx context.Context, orgId string, cloudProvider string,
|
func (ah *APIHandler) getOrCreateCloudIntegrationUser(ctx context.Context, orgId string, cloudProvider valuer.String) (*types.User, error) {
|
||||||
) (*types.User, *basemodel.ApiError) {
|
cloudIntegrationUserName := fmt.Sprintf("%s-integration", cloudProvider.String())
|
||||||
cloudIntegrationUserName := fmt.Sprintf("%s-integration", cloudProvider)
|
|
||||||
email := valuer.MustNewEmail(fmt.Sprintf("%s@signoz.io", cloudIntegrationUserName))
|
email := valuer.MustNewEmail(fmt.Sprintf("%s@signoz.io", cloudIntegrationUserName))
|
||||||
|
|
||||||
cloudIntegrationUser, err := types.NewUser(cloudIntegrationUserName, email, types.RoleViewer, valuer.MustNewUUID(orgId))
|
cloudIntegrationUser, err := types.NewUser(cloudIntegrationUserName, email, types.RoleViewer, valuer.MustNewUUID(orgId))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, basemodel.InternalError(fmt.Errorf("couldn't create cloud integration user: %w", err))
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
password := types.MustGenerateFactorPassword(cloudIntegrationUser.ID.StringValue())
|
password := types.MustGenerateFactorPassword(cloudIntegrationUser.ID.StringValue())
|
||||||
|
|
||||||
cloudIntegrationUser, err = ah.Signoz.Modules.User.GetOrCreateUser(ctx, cloudIntegrationUser, user.WithFactorPassword(password))
|
cloudIntegrationUser, err = ah.Signoz.Modules.User.GetOrCreateUser(ctx, cloudIntegrationUser, user.WithFactorPassword(password))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, basemodel.InternalError(fmt.Errorf("couldn't look for integration user: %w", err))
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return cloudIntegrationUser, nil
|
return cloudIntegrationUser, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ah *APIHandler) getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licenseKey string) (
|
// TODO: move this function out of handler and use proper module structure
|
||||||
string, *basemodel.ApiError,
|
func (ah *APIHandler) getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licenseKey string) (string, error) {
|
||||||
) {
|
|
||||||
// TODO: remove this struct from here
|
|
||||||
type deploymentResponse struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
ClusterInfo struct {
|
|
||||||
Region struct {
|
|
||||||
DNS string `json:"dns"`
|
|
||||||
} `json:"region"`
|
|
||||||
} `json:"cluster"`
|
|
||||||
}
|
|
||||||
|
|
||||||
respBytes, err := ah.Signoz.Zeus.GetDeployment(ctx, licenseKey)
|
respBytes, err := ah.Signoz.Zeus.GetDeployment(ctx, licenseKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", basemodel.InternalError(fmt.Errorf(
|
return "", errors.WrapInternalf(err, errors.CodeInternal, "couldn't query for deployment info: error")
|
||||||
"couldn't query for deployment info: error: %w", err,
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resp := new(deploymentResponse)
|
resp := new(integrationtypes.GettableDeployment)
|
||||||
|
|
||||||
err = json.Unmarshal(respBytes, resp)
|
err = json.Unmarshal(respBytes, resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", basemodel.InternalError(fmt.Errorf(
|
return "", errors.WrapInternalf(err, errors.CodeInternal, "couldn't unmarshal deployment info response")
|
||||||
"couldn't unmarshal deployment info response: error: %w", err,
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
regionDns := resp.ClusterInfo.Region.DNS
|
regionDns := resp.ClusterInfo.Region.DNS
|
||||||
@@ -219,9 +187,10 @@ func (ah *APIHandler) getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licens
|
|||||||
|
|
||||||
if len(regionDns) < 1 || len(deploymentName) < 1 {
|
if len(regionDns) < 1 || len(deploymentName) < 1 {
|
||||||
// Fail early if actual response structure and expectation here ever diverge
|
// Fail early if actual response structure and expectation here ever diverge
|
||||||
return "", basemodel.InternalError(fmt.Errorf(
|
return "", errors.NewInternalf(
|
||||||
|
errors.CodeInternal,
|
||||||
"deployment info response not in expected shape. couldn't determine region dns and deployment name",
|
"deployment info response not in expected shape. couldn't determine region dns and deployment name",
|
||||||
))
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
signozApiUrl := fmt.Sprintf("https://%s.%s", deploymentName, regionDns)
|
signozApiUrl := fmt.Sprintf("https://%s.%s", deploymentName, regionDns)
|
||||||
@@ -229,102 +198,85 @@ func (ah *APIHandler) getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licens
|
|||||||
return signozApiUrl, nil
|
return signozApiUrl, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type ingestionKey struct {
|
func (ah *APIHandler) getOrCreateCloudProviderIngestionKey(
|
||||||
Name string `json:"name"`
|
ctx context.Context, gatewayUrl string, licenseKey string, cloudProvider valuer.String,
|
||||||
Value string `json:"value"`
|
) (string, error) {
|
||||||
// other attributes from gateway response not included here since they are not being used.
|
|
||||||
}
|
|
||||||
|
|
||||||
type ingestionKeysSearchResponse struct {
|
|
||||||
Status string `json:"status"`
|
|
||||||
Data []ingestionKey `json:"data"`
|
|
||||||
Error string `json:"error"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type createIngestionKeyResponse struct {
|
|
||||||
Status string `json:"status"`
|
|
||||||
Data ingestionKey `json:"data"`
|
|
||||||
Error string `json:"error"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func getOrCreateCloudProviderIngestionKey(
|
|
||||||
ctx context.Context, gatewayUrl string, licenseKey string, cloudProvider string,
|
|
||||||
) (string, *basemodel.ApiError) {
|
|
||||||
cloudProviderKeyName := fmt.Sprintf("%s-integration", cloudProvider)
|
cloudProviderKeyName := fmt.Sprintf("%s-integration", cloudProvider)
|
||||||
|
|
||||||
// see if the key already exists
|
// see if the key already exists
|
||||||
searchResult, apiErr := requestGateway[ingestionKeysSearchResponse](
|
searchResult, err := requestGateway[integrationtypes.GettableIngestionKeysSearch](
|
||||||
ctx,
|
ctx,
|
||||||
gatewayUrl,
|
gatewayUrl,
|
||||||
licenseKey,
|
licenseKey,
|
||||||
fmt.Sprintf("/v1/workspaces/me/keys/search?name=%s", cloudProviderKeyName),
|
fmt.Sprintf("/v1/workspaces/me/keys/search?name=%s", cloudProviderKeyName),
|
||||||
nil,
|
nil,
|
||||||
|
ah.opts.Logger,
|
||||||
)
|
)
|
||||||
|
if err != nil {
|
||||||
if apiErr != nil {
|
return "", err
|
||||||
return "", basemodel.WrapApiError(
|
|
||||||
apiErr, "couldn't search for cloudprovider ingestion key",
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if searchResult.Status != "success" {
|
if searchResult.Status != "success" {
|
||||||
return "", basemodel.InternalError(fmt.Errorf(
|
return "", errors.NewInternalf(
|
||||||
"couldn't search for cloudprovider ingestion key: status: %s, error: %s",
|
errors.CodeInternal,
|
||||||
|
"couldn't search for cloud provider ingestion key: status: %s, error: %s",
|
||||||
searchResult.Status, searchResult.Error,
|
searchResult.Status, searchResult.Error,
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, k := range searchResult.Data {
|
|
||||||
if k.Name == cloudProviderKeyName {
|
|
||||||
if len(k.Value) < 1 {
|
|
||||||
// Fail early if actual response structure and expectation here ever diverge
|
|
||||||
return "", basemodel.InternalError(fmt.Errorf(
|
|
||||||
"ingestion keys search response not as expected",
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
return k.Value, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
zap.L().Info(
|
|
||||||
"no existing ingestion key found for cloud integration, creating a new one",
|
|
||||||
zap.String("cloudProvider", cloudProvider),
|
|
||||||
)
|
|
||||||
createKeyResult, apiErr := requestGateway[createIngestionKeyResponse](
|
|
||||||
ctx, gatewayUrl, licenseKey, "/v1/workspaces/me/keys",
|
|
||||||
map[string]any{
|
|
||||||
"name": cloudProviderKeyName,
|
|
||||||
"tags": []string{"integration", cloudProvider},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
if apiErr != nil {
|
|
||||||
return "", basemodel.WrapApiError(
|
|
||||||
apiErr, "couldn't create cloudprovider ingestion key",
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, k := range searchResult.Data {
|
||||||
|
if k.Name != cloudProviderKeyName {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(k.Value) < 1 {
|
||||||
|
// Fail early if actual response structure and expectation here ever diverge
|
||||||
|
return "", errors.NewInternalf(errors.CodeInternal, "ingestion keys search response not as expected")
|
||||||
|
}
|
||||||
|
|
||||||
|
return k.Value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ah.opts.Logger.InfoContext(
|
||||||
|
ctx,
|
||||||
|
"no existing ingestion key found for cloud integration, creating a new one",
|
||||||
|
slog.String("cloudProvider", cloudProvider.String()),
|
||||||
|
)
|
||||||
|
|
||||||
|
createKeyResult, err := requestGateway[integrationtypes.GettableCreateIngestionKey](
|
||||||
|
ctx, gatewayUrl, licenseKey, "/v1/workspaces/me/keys",
|
||||||
|
map[string]any{
|
||||||
|
"name": cloudProviderKeyName,
|
||||||
|
"tags": []string{"integration", cloudProvider.String()},
|
||||||
|
},
|
||||||
|
ah.opts.Logger,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
if createKeyResult.Status != "success" {
|
if createKeyResult.Status != "success" {
|
||||||
return "", basemodel.InternalError(fmt.Errorf(
|
return "", errors.NewInternalf(
|
||||||
"couldn't create cloudprovider ingestion key: status: %s, error: %s",
|
errors.CodeInternal,
|
||||||
|
"couldn't create cloud provider ingestion key: status: %s, error: %s",
|
||||||
createKeyResult.Status, createKeyResult.Error,
|
createKeyResult.Status, createKeyResult.Error,
|
||||||
))
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
ingestionKey := createKeyResult.Data.Value
|
ingestionKeyString := createKeyResult.Data.Value
|
||||||
if len(ingestionKey) < 1 {
|
if len(ingestionKeyString) < 1 {
|
||||||
// Fail early if actual response structure and expectation here ever diverge
|
// Fail early if actual response structure and expectation here ever diverge
|
||||||
return "", basemodel.InternalError(fmt.Errorf(
|
return "", errors.NewInternalf(errors.CodeInternal,
|
||||||
"ingestion key creation response not as expected",
|
"ingestion key creation response not as expected",
|
||||||
))
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
return ingestionKey, nil
|
return ingestionKeyString, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func requestGateway[ResponseType any](
|
func requestGateway[ResponseType any](
|
||||||
ctx context.Context, gatewayUrl string, licenseKey string, path string, payload any,
|
ctx context.Context, gatewayUrl, licenseKey, path string, payload any, logger *slog.Logger,
|
||||||
) (*ResponseType, *basemodel.ApiError) {
|
) (*ResponseType, error) {
|
||||||
|
|
||||||
baseUrl := strings.TrimSuffix(gatewayUrl, "/")
|
baseUrl := strings.TrimSuffix(gatewayUrl, "/")
|
||||||
reqUrl := fmt.Sprintf("%s%s", baseUrl, path)
|
reqUrl := fmt.Sprintf("%s%s", baseUrl, path)
|
||||||
@@ -335,13 +287,12 @@ func requestGateway[ResponseType any](
|
|||||||
"X-Consumer-Groups": "ns:default",
|
"X-Consumer-Groups": "ns:default",
|
||||||
}
|
}
|
||||||
|
|
||||||
return requestAndParseResponse[ResponseType](ctx, reqUrl, headers, payload)
|
return requestAndParseResponse[ResponseType](ctx, reqUrl, headers, payload, logger)
|
||||||
}
|
}
|
||||||
|
|
||||||
func requestAndParseResponse[ResponseType any](
|
func requestAndParseResponse[ResponseType any](
|
||||||
ctx context.Context, url string, headers map[string]string, payload any,
|
ctx context.Context, url string, headers map[string]string, payload any, logger *slog.Logger,
|
||||||
) (*ResponseType, *basemodel.ApiError) {
|
) (*ResponseType, error) {
|
||||||
|
|
||||||
reqMethod := http.MethodGet
|
reqMethod := http.MethodGet
|
||||||
var reqBody io.Reader
|
var reqBody io.Reader
|
||||||
if payload != nil {
|
if payload != nil {
|
||||||
@@ -349,18 +300,14 @@ func requestAndParseResponse[ResponseType any](
|
|||||||
|
|
||||||
bodyJson, err := json.Marshal(payload)
|
bodyJson, err := json.Marshal(payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, basemodel.InternalError(fmt.Errorf(
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't marshal payload")
|
||||||
"couldn't serialize request payload to JSON: %w", err,
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
reqBody = bytes.NewBuffer([]byte(bodyJson))
|
reqBody = bytes.NewBuffer(bodyJson)
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, reqMethod, url, reqBody)
|
req, err := http.NewRequestWithContext(ctx, reqMethod, url, reqBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, basemodel.InternalError(fmt.Errorf(
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't create req")
|
||||||
"couldn't prepare request: %w", err,
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range headers {
|
for k, v := range headers {
|
||||||
@@ -373,23 +320,26 @@ func requestAndParseResponse[ResponseType any](
|
|||||||
|
|
||||||
response, err := client.Do(req)
|
response, err := client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, basemodel.InternalError(fmt.Errorf("couldn't make request: %w", err))
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't make req")
|
||||||
}
|
}
|
||||||
|
|
||||||
defer response.Body.Close()
|
defer func() {
|
||||||
|
err = response.Body.Close()
|
||||||
|
if err != nil {
|
||||||
|
logger.ErrorContext(ctx, "couldn't close response body", "error", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
respBody, err := io.ReadAll(response.Body)
|
respBody, err := io.ReadAll(response.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, basemodel.InternalError(fmt.Errorf("couldn't read response: %w", err))
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't read response body")
|
||||||
}
|
}
|
||||||
|
|
||||||
var resp ResponseType
|
var resp ResponseType
|
||||||
|
|
||||||
err = json.Unmarshal(respBody, &resp)
|
err = json.Unmarshal(respBody, &resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, basemodel.InternalError(fmt.Errorf(
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't unmarshal response body")
|
||||||
"couldn't unmarshal gateway response into %T", resp,
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &resp, nil
|
return &resp, nil
|
||||||
|
|||||||
@@ -37,7 +37,6 @@ import (
|
|||||||
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
|
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
|
||||||
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
|
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
|
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
|
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
|
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/opamp"
|
"github.com/SigNoz/signoz/pkg/query-service/app/opamp"
|
||||||
@@ -121,13 +120,6 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
cloudIntegrationsController, err := cloudintegrations.NewController(signoz.SQLStore)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"couldn't create cloud provider integrations controller: %w", err,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ingestion pipelines manager
|
// ingestion pipelines manager
|
||||||
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
|
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
|
||||||
signoz.SQLStore,
|
signoz.SQLStore,
|
||||||
@@ -161,11 +153,11 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
|
|||||||
RulesManager: rm,
|
RulesManager: rm,
|
||||||
UsageManager: usageManager,
|
UsageManager: usageManager,
|
||||||
IntegrationsController: integrationsController,
|
IntegrationsController: integrationsController,
|
||||||
CloudIntegrationsController: cloudIntegrationsController,
|
|
||||||
LogsParsingPipelineController: logParsingPipelineController,
|
LogsParsingPipelineController: logParsingPipelineController,
|
||||||
FluxInterval: config.Querier.FluxInterval,
|
FluxInterval: config.Querier.FluxInterval,
|
||||||
GatewayUrl: config.Gateway.URL.String(),
|
GatewayUrl: config.Gateway.URL.String(),
|
||||||
GlobalConfig: config.Global,
|
GlobalConfig: config.Global,
|
||||||
|
Logger: signoz.Instrumentation.Logger(),
|
||||||
}
|
}
|
||||||
|
|
||||||
apiHandler, err := api.NewAPIHandler(apiOpts, signoz, config)
|
apiHandler, err := api.NewAPIHandler(apiOpts, signoz, config)
|
||||||
|
|||||||
@@ -286,5 +286,6 @@
|
|||||||
"brace-expansion": "^2.0.2",
|
"brace-expansion": "^2.0.2",
|
||||||
"on-headers": "^1.1.0",
|
"on-headers": "^1.1.0",
|
||||||
"tmp": "0.2.4"
|
"tmp": "0.2.4"
|
||||||
}
|
},
|
||||||
|
"packageManager": "yarn@1.22.22+sha512.a6b2f7906b721bba3d67d4aff083df04dad64c399707841b7acf00f6b133b7ac24255f2652fa22ae3534329dc6180534e98d17432037ff6fd140556e2bb3137e"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -86,8 +86,13 @@ function LogDetailInner({
|
|||||||
const handleClickOutside = (e: MouseEvent): void => {
|
const handleClickOutside = (e: MouseEvent): void => {
|
||||||
const target = e.target as HTMLElement;
|
const target = e.target as HTMLElement;
|
||||||
|
|
||||||
// Don't close if clicking on explicitly ignored regions
|
// Don't close if clicking on drawer content, overlays, or portal elements
|
||||||
if (target.closest('[data-log-detail-ignore="true"]')) {
|
if (
|
||||||
|
target.closest('[data-log-detail-ignore="true"]') ||
|
||||||
|
target.closest('.cm-tooltip-autocomplete') ||
|
||||||
|
target.closest('.drawer-popover') ||
|
||||||
|
target.closest('.query-status-popover')
|
||||||
|
) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -400,7 +405,11 @@ function LogDetailInner({
|
|||||||
<div className="log-detail-drawer__content" data-log-detail-ignore="true">
|
<div className="log-detail-drawer__content" data-log-detail-ignore="true">
|
||||||
<div className="log-detail-drawer__log">
|
<div className="log-detail-drawer__log">
|
||||||
<Divider type="vertical" className={cx('log-type-indicator', logType)} />
|
<Divider type="vertical" className={cx('log-type-indicator', logType)} />
|
||||||
<Tooltip title={removeEscapeCharacters(log?.body)} placement="left">
|
<Tooltip
|
||||||
|
title={removeEscapeCharacters(log?.body)}
|
||||||
|
placement="left"
|
||||||
|
mouseLeaveDelay={0}
|
||||||
|
>
|
||||||
<div className="log-body" dangerouslySetInnerHTML={htmlBody} />
|
<div className="log-body" dangerouslySetInnerHTML={htmlBody} />
|
||||||
</Tooltip>
|
</Tooltip>
|
||||||
|
|
||||||
@@ -466,6 +475,7 @@ function LogDetailInner({
|
|||||||
title="Show Filters"
|
title="Show Filters"
|
||||||
placement="topLeft"
|
placement="topLeft"
|
||||||
aria-label="Show Filters"
|
aria-label="Show Filters"
|
||||||
|
mouseLeaveDelay={0}
|
||||||
>
|
>
|
||||||
<Button
|
<Button
|
||||||
className="action-btn"
|
className="action-btn"
|
||||||
@@ -481,6 +491,7 @@ function LogDetailInner({
|
|||||||
aria-label={
|
aria-label={
|
||||||
selectedView === VIEW_TYPES.JSON ? 'Copy JSON' : 'Copy Log Link'
|
selectedView === VIEW_TYPES.JSON ? 'Copy JSON' : 'Copy Log Link'
|
||||||
}
|
}
|
||||||
|
mouseLeaveDelay={0}
|
||||||
>
|
>
|
||||||
<Button
|
<Button
|
||||||
className="action-btn"
|
className="action-btn"
|
||||||
|
|||||||
@@ -27,7 +27,11 @@ function AddToQueryHOC({
|
|||||||
return (
|
return (
|
||||||
// eslint-disable-next-line jsx-a11y/click-events-have-key-events, jsx-a11y/no-static-element-interactions
|
// eslint-disable-next-line jsx-a11y/click-events-have-key-events, jsx-a11y/no-static-element-interactions
|
||||||
<div className={cx('addToQueryContainer', fontSize)} onClick={handleQueryAdd}>
|
<div className={cx('addToQueryContainer', fontSize)} onClick={handleQueryAdd}>
|
||||||
<Popover placement="top" content={popOverContent}>
|
<Popover
|
||||||
|
overlayClassName="drawer-popover"
|
||||||
|
placement="top"
|
||||||
|
content={popOverContent}
|
||||||
|
>
|
||||||
{children}
|
{children}
|
||||||
</Popover>
|
</Popover>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ function CopyClipboardHOC({
|
|||||||
<span onClick={onClick} role="presentation" tabIndex={-1}>
|
<span onClick={onClick} role="presentation" tabIndex={-1}>
|
||||||
<Popover
|
<Popover
|
||||||
placement="top"
|
placement="top"
|
||||||
|
overlayClassName="drawer-popover"
|
||||||
content={<span style={{ fontSize: '0.9rem' }}>{tooltipText}</span>}
|
content={<span style={{ fontSize: '0.9rem' }}>{tooltipText}</span>}
|
||||||
>
|
>
|
||||||
{children}
|
{children}
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ export function getDefaultCellStyle(isDarkMode?: boolean): CSSProperties {
|
|||||||
|
|
||||||
export const defaultTableStyle: CSSProperties = {
|
export const defaultTableStyle: CSSProperties = {
|
||||||
minWidth: '40rem',
|
minWidth: '40rem',
|
||||||
maxWidth: '60rem',
|
maxWidth: '90rem',
|
||||||
};
|
};
|
||||||
|
|
||||||
export const defaultListViewPanelStyle: CSSProperties = {
|
export const defaultListViewPanelStyle: CSSProperties = {
|
||||||
|
|||||||
@@ -1328,7 +1328,10 @@ function QuerySearch({
|
|||||||
)}
|
)}
|
||||||
|
|
||||||
<div className="query-where-clause-editor-container">
|
<div className="query-where-clause-editor-container">
|
||||||
<Tooltip title={getTooltipContent()} placement="left">
|
<Tooltip
|
||||||
|
title={<div data-log-detail-ignore="true">{getTooltipContent()}</div>}
|
||||||
|
placement="left"
|
||||||
|
>
|
||||||
<a
|
<a
|
||||||
href="https://signoz.io/docs/userguide/search-syntax/"
|
href="https://signoz.io/docs/userguide/search-syntax/"
|
||||||
target="_blank"
|
target="_blank"
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import {
|
|||||||
import useVariablesFromUrl from 'hooks/dashboard/useVariablesFromUrl';
|
import useVariablesFromUrl from 'hooks/dashboard/useVariablesFromUrl';
|
||||||
import { useDashboard } from 'providers/Dashboard/Dashboard';
|
import { useDashboard } from 'providers/Dashboard/Dashboard';
|
||||||
import { initializeDefaultVariables } from 'providers/Dashboard/initializeDefaultVariables';
|
import { initializeDefaultVariables } from 'providers/Dashboard/initializeDefaultVariables';
|
||||||
|
import { updateDashboardVariablesStore } from 'providers/Dashboard/store/dashboardVariables/dashboardVariablesStore';
|
||||||
import {
|
import {
|
||||||
enqueueDescendantsOfVariable,
|
enqueueDescendantsOfVariable,
|
||||||
enqueueFetchOfAllVariables,
|
enqueueFetchOfAllVariables,
|
||||||
@@ -31,6 +32,9 @@ function DashboardVariableSelection(): JSX.Element | null {
|
|||||||
const { updateUrlVariable, getUrlVariables } = useVariablesFromUrl();
|
const { updateUrlVariable, getUrlVariables } = useVariablesFromUrl();
|
||||||
|
|
||||||
const { dashboardVariables } = useDashboardVariables();
|
const { dashboardVariables } = useDashboardVariables();
|
||||||
|
const dashboardId = useDashboardVariablesSelector(
|
||||||
|
(state) => state.dashboardId,
|
||||||
|
);
|
||||||
const sortedVariablesArray = useDashboardVariablesSelector(
|
const sortedVariablesArray = useDashboardVariablesSelector(
|
||||||
(state) => state.sortedVariablesArray,
|
(state) => state.sortedVariablesArray,
|
||||||
);
|
);
|
||||||
@@ -96,6 +100,28 @@ function DashboardVariableSelection(): JSX.Element | null {
|
|||||||
updateUrlVariable(name || id, value);
|
updateUrlVariable(name || id, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Synchronously update the external store with the new variable value so that
|
||||||
|
// child variables see the updated parent value when they refetch, rather than
|
||||||
|
// waiting for setSelectedDashboard → useEffect → updateDashboardVariablesStore.
|
||||||
|
const updatedVariables = { ...dashboardVariables };
|
||||||
|
if (updatedVariables[id]) {
|
||||||
|
updatedVariables[id] = {
|
||||||
|
...updatedVariables[id],
|
||||||
|
selectedValue: value,
|
||||||
|
allSelected,
|
||||||
|
haveCustomValuesSelected,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
if (updatedVariables[name]) {
|
||||||
|
updatedVariables[name] = {
|
||||||
|
...updatedVariables[name],
|
||||||
|
selectedValue: value,
|
||||||
|
allSelected,
|
||||||
|
haveCustomValuesSelected,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
updateDashboardVariablesStore({ dashboardId, variables: updatedVariables });
|
||||||
|
|
||||||
setSelectedDashboard((prev) => {
|
setSelectedDashboard((prev) => {
|
||||||
if (prev) {
|
if (prev) {
|
||||||
const oldVariables = { ...prev?.data.variables };
|
const oldVariables = { ...prev?.data.variables };
|
||||||
@@ -130,10 +156,12 @@ function DashboardVariableSelection(): JSX.Element | null {
|
|||||||
return prev;
|
return prev;
|
||||||
});
|
});
|
||||||
|
|
||||||
// Cascade: enqueue query-type descendants for refetching
|
// Cascade: enqueue query-type descendants for refetching.
|
||||||
|
// Safe to call synchronously now that the store already has the updated value.
|
||||||
enqueueDescendantsOfVariable(name);
|
enqueueDescendantsOfVariable(name);
|
||||||
},
|
},
|
||||||
[
|
[
|
||||||
|
dashboardId,
|
||||||
dashboardVariables,
|
dashboardVariables,
|
||||||
updateLocalStorageDashboardVariables,
|
updateLocalStorageDashboardVariables,
|
||||||
updateUrlVariable,
|
updateUrlVariable,
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import dashboardVariablesQuery from 'api/dashboard/variables/dashboardVariablesQ
|
|||||||
import { REACT_QUERY_KEY } from 'constants/reactQueryKeys';
|
import { REACT_QUERY_KEY } from 'constants/reactQueryKeys';
|
||||||
import { useVariableFetchState } from 'hooks/dashboard/useVariableFetchState';
|
import { useVariableFetchState } from 'hooks/dashboard/useVariableFetchState';
|
||||||
import sortValues from 'lib/dashboardVariables/sortVariableValues';
|
import sortValues from 'lib/dashboardVariables/sortVariableValues';
|
||||||
import { isArray, isEmpty, isString } from 'lodash-es';
|
import { isArray, isEmpty } from 'lodash-es';
|
||||||
import { AppState } from 'store/reducers';
|
import { AppState } from 'store/reducers';
|
||||||
import { VariableResponseProps } from 'types/api/dashboard/variables/query';
|
import { VariableResponseProps } from 'types/api/dashboard/variables/query';
|
||||||
import { GlobalReducer } from 'types/reducer/globalTime';
|
import { GlobalReducer } from 'types/reducer/globalTime';
|
||||||
@@ -54,7 +54,7 @@ function QueryVariableInput({
|
|||||||
onChange,
|
onChange,
|
||||||
onDropdownVisibleChange,
|
onDropdownVisibleChange,
|
||||||
handleClear,
|
handleClear,
|
||||||
applyDefaultIfNeeded,
|
getDefaultValue,
|
||||||
} = useDashboardVariableSelectHelper({
|
} = useDashboardVariableSelectHelper({
|
||||||
variableData,
|
variableData,
|
||||||
optionsData,
|
optionsData,
|
||||||
@@ -68,81 +68,93 @@ function QueryVariableInput({
|
|||||||
try {
|
try {
|
||||||
setErrorMessage(null);
|
setErrorMessage(null);
|
||||||
|
|
||||||
|
// This is just a check given the previously undefined typed name prop. Not significant
|
||||||
|
// This will be changed when we change the schema
|
||||||
|
// TODO: @AshwinBhatkal Perses
|
||||||
|
if (!variableData.name) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the response is not an array, premature return
|
||||||
if (
|
if (
|
||||||
variablesRes?.variableValues &&
|
!variablesRes?.variableValues ||
|
||||||
Array.isArray(variablesRes?.variableValues)
|
!Array.isArray(variablesRes?.variableValues)
|
||||||
) {
|
) {
|
||||||
const newOptionsData = sortValues(
|
return;
|
||||||
variablesRes?.variableValues,
|
}
|
||||||
variableData.sort,
|
|
||||||
|
const sortedNewOptions = sortValues(
|
||||||
|
variablesRes.variableValues,
|
||||||
|
variableData.sort,
|
||||||
|
);
|
||||||
|
const sortedOldOptions = sortValues(optionsData, variableData.sort);
|
||||||
|
|
||||||
|
// if options are the same as before, no need to update state or check for selected value validity
|
||||||
|
// ! selectedValue needs to be set in the first pass though, as options are initially empty array and we need to apply default if needed
|
||||||
|
// Expecatation is that when oldOptions are not empty, then there is always some selectedValue
|
||||||
|
if (areArraysEqual(sortedNewOptions, sortedOldOptions)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
setOptionsData(sortedNewOptions);
|
||||||
|
|
||||||
|
let isSelectedValueMissingInNewOptions = false;
|
||||||
|
|
||||||
|
// Check if currently selected value(s) are present in the new options list
|
||||||
|
if (isArray(variableData.selectedValue)) {
|
||||||
|
isSelectedValueMissingInNewOptions = variableData.selectedValue.some(
|
||||||
|
(val) => !sortedNewOptions.includes(val),
|
||||||
);
|
);
|
||||||
|
} else if (
|
||||||
|
variableData.selectedValue &&
|
||||||
|
!sortedNewOptions.includes(variableData.selectedValue)
|
||||||
|
) {
|
||||||
|
isSelectedValueMissingInNewOptions = true;
|
||||||
|
}
|
||||||
|
|
||||||
const oldOptionsData = sortValues(optionsData, variableData.sort) as never;
|
// If multi-select with ALL option enabled, and ALL is currently selected, we want to maintain that state and select all new options
|
||||||
|
// This block does not depend on selected value because of ALL and also because we would only come here if options are different from the previous
|
||||||
|
if (
|
||||||
|
variableData.multiSelect &&
|
||||||
|
variableData.showALLOption &&
|
||||||
|
variableData.allSelected &&
|
||||||
|
isSelectedValueMissingInNewOptions
|
||||||
|
) {
|
||||||
|
onValueUpdate(variableData.name, variableData.id, sortedNewOptions, true);
|
||||||
|
|
||||||
if (!areArraysEqual(newOptionsData, oldOptionsData)) {
|
// Update tempSelection to maintain ALL state when dropdown is open
|
||||||
let valueNotInList = false;
|
if (tempSelection !== undefined) {
|
||||||
|
setTempSelection(sortedNewOptions.map((option) => option.toString()));
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (isArray(variableData.selectedValue)) {
|
const value = variableData.selectedValue;
|
||||||
variableData.selectedValue.forEach((val) => {
|
let allSelected = false;
|
||||||
if (!newOptionsData.includes(val)) {
|
|
||||||
valueNotInList = true;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
} else if (
|
|
||||||
isString(variableData.selectedValue) &&
|
|
||||||
!newOptionsData.includes(variableData.selectedValue)
|
|
||||||
) {
|
|
||||||
valueNotInList = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (variableData.name && (valueNotInList || variableData.allSelected)) {
|
if (variableData.multiSelect) {
|
||||||
if (
|
const { selectedValue } = variableData;
|
||||||
variableData.allSelected &&
|
allSelected =
|
||||||
variableData.multiSelect &&
|
sortedNewOptions.length > 0 &&
|
||||||
variableData.showALLOption
|
Array.isArray(selectedValue) &&
|
||||||
) {
|
sortedNewOptions.every((option) => selectedValue.includes(option));
|
||||||
if (
|
}
|
||||||
variableData.name &&
|
|
||||||
variableData.id &&
|
|
||||||
!isEmpty(variableData.selectedValue)
|
|
||||||
) {
|
|
||||||
onValueUpdate(
|
|
||||||
variableData.name,
|
|
||||||
variableData.id,
|
|
||||||
newOptionsData,
|
|
||||||
true,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update tempSelection to maintain ALL state when dropdown is open
|
if (
|
||||||
if (tempSelection !== undefined) {
|
variableData.name &&
|
||||||
setTempSelection(newOptionsData.map((option) => option.toString()));
|
variableData.id &&
|
||||||
}
|
!isEmpty(variableData.selectedValue)
|
||||||
} else {
|
) {
|
||||||
const value = variableData.selectedValue;
|
onValueUpdate(variableData.name, variableData.id, value, allSelected);
|
||||||
let allSelected = false;
|
} else {
|
||||||
|
const defaultValue = getDefaultValue(sortedNewOptions);
|
||||||
if (variableData.multiSelect) {
|
if (defaultValue !== undefined) {
|
||||||
const { selectedValue } = variableData;
|
onValueUpdate(
|
||||||
allSelected =
|
variableData.name,
|
||||||
newOptionsData.length > 0 &&
|
variableData.id,
|
||||||
Array.isArray(selectedValue) &&
|
defaultValue,
|
||||||
newOptionsData.every((option) => selectedValue.includes(option));
|
allSelected,
|
||||||
}
|
);
|
||||||
|
|
||||||
if (
|
|
||||||
variableData.name &&
|
|
||||||
variableData.id &&
|
|
||||||
!isEmpty(variableData.selectedValue)
|
|
||||||
) {
|
|
||||||
onValueUpdate(variableData.name, variableData.id, value, allSelected);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
setOptionsData(newOptionsData);
|
|
||||||
// Apply default if no value is selected (e.g., new variable, first load)
|
|
||||||
applyDefaultIfNeeded(newOptionsData);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
@@ -155,7 +167,7 @@ function QueryVariableInput({
|
|||||||
onValueUpdate,
|
onValueUpdate,
|
||||||
tempSelection,
|
tempSelection,
|
||||||
setTempSelection,
|
setTempSelection,
|
||||||
applyDefaultIfNeeded,
|
getDefaultValue,
|
||||||
],
|
],
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
/* eslint-disable sonarjs/no-duplicate-string */
|
/* eslint-disable sonarjs/no-duplicate-string */
|
||||||
import { act, render } from '@testing-library/react';
|
import { act, render } from '@testing-library/react';
|
||||||
|
import * as dashboardVariablesStoreModule from 'providers/Dashboard/store/dashboardVariables/dashboardVariablesStore';
|
||||||
import {
|
import {
|
||||||
dashboardVariablesStore,
|
dashboardVariablesStore,
|
||||||
setDashboardVariablesStore,
|
setDashboardVariablesStore,
|
||||||
@@ -10,6 +11,7 @@ import {
|
|||||||
IDashboardVariablesStoreState,
|
IDashboardVariablesStoreState,
|
||||||
} from 'providers/Dashboard/store/dashboardVariables/dashboardVariablesStoreTypes';
|
} from 'providers/Dashboard/store/dashboardVariables/dashboardVariablesStoreTypes';
|
||||||
import {
|
import {
|
||||||
|
enqueueDescendantsOfVariable,
|
||||||
enqueueFetchOfAllVariables,
|
enqueueFetchOfAllVariables,
|
||||||
initializeVariableFetchStore,
|
initializeVariableFetchStore,
|
||||||
} from 'providers/Dashboard/store/variableFetchStore';
|
} from 'providers/Dashboard/store/variableFetchStore';
|
||||||
@@ -17,6 +19,17 @@ import { IDashboardVariable } from 'types/api/dashboard/getAll';
|
|||||||
|
|
||||||
import DashboardVariableSelection from '../DashboardVariableSelection';
|
import DashboardVariableSelection from '../DashboardVariableSelection';
|
||||||
|
|
||||||
|
// Mutable container to capture the onValueUpdate callback from VariableItem
|
||||||
|
const mockVariableItemCallbacks: {
|
||||||
|
onValueUpdate?: (
|
||||||
|
name: string,
|
||||||
|
id: string,
|
||||||
|
value: IDashboardVariable['selectedValue'],
|
||||||
|
allSelected: boolean,
|
||||||
|
haveCustomValuesSelected?: boolean,
|
||||||
|
) => void;
|
||||||
|
} = {};
|
||||||
|
|
||||||
// Mock providers/Dashboard/Dashboard
|
// Mock providers/Dashboard/Dashboard
|
||||||
const mockSetSelectedDashboard = jest.fn();
|
const mockSetSelectedDashboard = jest.fn();
|
||||||
const mockUpdateLocalStorageDashboardVariables = jest.fn();
|
const mockUpdateLocalStorageDashboardVariables = jest.fn();
|
||||||
@@ -56,10 +69,14 @@ jest.mock('react-redux', () => ({
|
|||||||
useSelector: jest.fn().mockReturnValue({ minTime: 1000, maxTime: 2000 }),
|
useSelector: jest.fn().mockReturnValue({ minTime: 1000, maxTime: 2000 }),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
// Mock VariableItem to avoid rendering complexity
|
// VariableItem mock captures the onValueUpdate prop for use in onValueUpdate tests
|
||||||
jest.mock('../VariableItem', () => ({
|
jest.mock('../VariableItem', () => ({
|
||||||
__esModule: true,
|
__esModule: true,
|
||||||
default: (): JSX.Element => <div data-testid="variable-item" />,
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||||
|
default: (props: any): JSX.Element => {
|
||||||
|
mockVariableItemCallbacks.onValueUpdate = props.onValueUpdate;
|
||||||
|
return <div data-testid="variable-item" />;
|
||||||
|
},
|
||||||
}));
|
}));
|
||||||
|
|
||||||
function createVariable(
|
function createVariable(
|
||||||
@@ -200,4 +217,162 @@ describe('DashboardVariableSelection', () => {
|
|||||||
expect(initializeVariableFetchStore).not.toHaveBeenCalled();
|
expect(initializeVariableFetchStore).not.toHaveBeenCalled();
|
||||||
expect(enqueueFetchOfAllVariables).not.toHaveBeenCalled();
|
expect(enqueueFetchOfAllVariables).not.toHaveBeenCalled();
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe('onValueUpdate', () => {
|
||||||
|
let updateStoreSpy: jest.SpyInstance;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
resetStore();
|
||||||
|
jest.clearAllMocks();
|
||||||
|
// Real implementation pass-through — we just want to observe calls
|
||||||
|
updateStoreSpy = jest.spyOn(
|
||||||
|
dashboardVariablesStoreModule,
|
||||||
|
'updateDashboardVariablesStore',
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
updateStoreSpy.mockRestore();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('updates dashboardVariablesStore synchronously before enqueueDescendantsOfVariable', () => {
|
||||||
|
setDashboardVariablesStore({
|
||||||
|
dashboardId: 'dash-1',
|
||||||
|
variables: {
|
||||||
|
env: createVariable({ name: 'env', id: 'env-id', order: 0 }),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
render(<DashboardVariableSelection />);
|
||||||
|
|
||||||
|
const callOrder: string[] = [];
|
||||||
|
updateStoreSpy.mockImplementation(() => {
|
||||||
|
callOrder.push('updateDashboardVariablesStore');
|
||||||
|
});
|
||||||
|
(enqueueDescendantsOfVariable as jest.Mock).mockImplementation(() => {
|
||||||
|
callOrder.push('enqueueDescendantsOfVariable');
|
||||||
|
});
|
||||||
|
|
||||||
|
act(() => {
|
||||||
|
mockVariableItemCallbacks.onValueUpdate?.(
|
||||||
|
'env',
|
||||||
|
'env-id',
|
||||||
|
'production',
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(callOrder).toEqual([
|
||||||
|
'updateDashboardVariablesStore',
|
||||||
|
'enqueueDescendantsOfVariable',
|
||||||
|
]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('passes updated variable value to dashboardVariablesStore', () => {
|
||||||
|
setDashboardVariablesStore({
|
||||||
|
dashboardId: 'dash-1',
|
||||||
|
variables: {
|
||||||
|
env: createVariable({
|
||||||
|
name: 'env',
|
||||||
|
id: 'env-id',
|
||||||
|
order: 0,
|
||||||
|
selectedValue: 'staging',
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
render(<DashboardVariableSelection />);
|
||||||
|
|
||||||
|
// Clear spy calls that happened during setup/render
|
||||||
|
updateStoreSpy.mockClear();
|
||||||
|
|
||||||
|
act(() => {
|
||||||
|
mockVariableItemCallbacks.onValueUpdate?.(
|
||||||
|
'env',
|
||||||
|
'env-id',
|
||||||
|
'production',
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(updateStoreSpy).toHaveBeenCalledWith(
|
||||||
|
expect.objectContaining({
|
||||||
|
dashboardId: 'dash-1',
|
||||||
|
variables: expect.objectContaining({
|
||||||
|
env: expect.objectContaining({
|
||||||
|
selectedValue: 'production',
|
||||||
|
allSelected: false,
|
||||||
|
}),
|
||||||
|
}),
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('calls enqueueDescendantsOfVariable synchronously without a timer', () => {
|
||||||
|
jest.useFakeTimers();
|
||||||
|
|
||||||
|
setDashboardVariablesStore({
|
||||||
|
dashboardId: 'dash-1',
|
||||||
|
variables: {
|
||||||
|
env: createVariable({ name: 'env', id: 'env-id', order: 0 }),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
render(<DashboardVariableSelection />);
|
||||||
|
|
||||||
|
act(() => {
|
||||||
|
mockVariableItemCallbacks.onValueUpdate?.(
|
||||||
|
'env',
|
||||||
|
'env-id',
|
||||||
|
'production',
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Must be called immediately — no timer advancement needed
|
||||||
|
expect(enqueueDescendantsOfVariable).toHaveBeenCalledWith('env');
|
||||||
|
|
||||||
|
jest.useRealTimers();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('propagates allSelected and haveCustomValuesSelected to the store', () => {
|
||||||
|
setDashboardVariablesStore({
|
||||||
|
dashboardId: 'dash-1',
|
||||||
|
variables: {
|
||||||
|
env: createVariable({
|
||||||
|
name: 'env',
|
||||||
|
id: 'env-id',
|
||||||
|
order: 0,
|
||||||
|
multiSelect: true,
|
||||||
|
showALLOption: true,
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
render(<DashboardVariableSelection />);
|
||||||
|
updateStoreSpy.mockClear();
|
||||||
|
|
||||||
|
act(() => {
|
||||||
|
mockVariableItemCallbacks.onValueUpdate?.(
|
||||||
|
'env',
|
||||||
|
'env-id',
|
||||||
|
['production', 'staging'],
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(updateStoreSpy).toHaveBeenCalledWith(
|
||||||
|
expect.objectContaining({
|
||||||
|
variables: expect.objectContaining({
|
||||||
|
env: expect.objectContaining({
|
||||||
|
selectedValue: ['production', 'staging'],
|
||||||
|
allSelected: true,
|
||||||
|
haveCustomValuesSelected: false,
|
||||||
|
}),
|
||||||
|
}),
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -0,0 +1,275 @@
|
|||||||
|
/* eslint-disable sonarjs/no-duplicate-string */
|
||||||
|
import { QueryClient, QueryClientProvider } from 'react-query';
|
||||||
|
import { act, render, waitFor } from '@testing-library/react';
|
||||||
|
import dashboardVariablesQuery from 'api/dashboard/variables/dashboardVariablesQuery';
|
||||||
|
import { variableFetchStore } from 'providers/Dashboard/store/variableFetchStore';
|
||||||
|
import { IDashboardVariable } from 'types/api/dashboard/getAll';
|
||||||
|
|
||||||
|
import QueryVariableInput from '../QueryVariableInput';
|
||||||
|
|
||||||
|
jest.mock('api/dashboard/variables/dashboardVariablesQuery');
|
||||||
|
|
||||||
|
jest.mock('react-redux', () => ({
|
||||||
|
...jest.requireActual('react-redux'),
|
||||||
|
useSelector: jest.fn().mockReturnValue({ minTime: 1000, maxTime: 2000 }),
|
||||||
|
}));
|
||||||
|
|
||||||
|
function createTestQueryClient(): QueryClient {
|
||||||
|
return new QueryClient({
|
||||||
|
defaultOptions: {
|
||||||
|
queries: { retry: false, refetchOnWindowFocus: false },
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function Wrapper({
|
||||||
|
children,
|
||||||
|
queryClient,
|
||||||
|
}: {
|
||||||
|
children: React.ReactNode;
|
||||||
|
queryClient: QueryClient;
|
||||||
|
}): JSX.Element {
|
||||||
|
return (
|
||||||
|
<QueryClientProvider client={queryClient}>{children}</QueryClientProvider>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function createVariable(
|
||||||
|
overrides: Partial<IDashboardVariable> = {},
|
||||||
|
): IDashboardVariable {
|
||||||
|
return {
|
||||||
|
id: 'env-id',
|
||||||
|
name: 'env',
|
||||||
|
description: '',
|
||||||
|
type: 'QUERY',
|
||||||
|
sort: 'DISABLED',
|
||||||
|
showALLOption: false,
|
||||||
|
multiSelect: false,
|
||||||
|
order: 0,
|
||||||
|
queryValue: 'SELECT env FROM table',
|
||||||
|
...overrides,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Put the named variable into 'loading' state so useQuery fires on mount */
|
||||||
|
function setVariableLoading(name: string): void {
|
||||||
|
variableFetchStore.update((draft) => {
|
||||||
|
draft.states[name] = 'loading';
|
||||||
|
draft.cycleIds[name] = (draft.cycleIds[name] || 0) + 1;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function resetFetchStore(): void {
|
||||||
|
variableFetchStore.set(() => ({
|
||||||
|
states: {},
|
||||||
|
lastUpdated: {},
|
||||||
|
cycleIds: {},
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
describe('QueryVariableInput - getOptions logic', () => {
|
||||||
|
const mockOnValueUpdate = jest.fn();
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
jest.clearAllMocks();
|
||||||
|
resetFetchStore();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
resetFetchStore();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('applies default value (first option) when selectedValue is empty on first load', async () => {
|
||||||
|
(dashboardVariablesQuery as jest.Mock).mockResolvedValue({
|
||||||
|
statusCode: 200,
|
||||||
|
payload: { variableValues: ['production', 'staging', 'dev'] },
|
||||||
|
});
|
||||||
|
|
||||||
|
const variable = createVariable({ selectedValue: undefined });
|
||||||
|
setVariableLoading('env');
|
||||||
|
|
||||||
|
const queryClient = createTestQueryClient();
|
||||||
|
render(
|
||||||
|
<Wrapper queryClient={queryClient}>
|
||||||
|
<QueryVariableInput
|
||||||
|
variableData={variable}
|
||||||
|
existingVariables={{ 'env-id': variable }}
|
||||||
|
onValueUpdate={mockOnValueUpdate}
|
||||||
|
/>
|
||||||
|
</Wrapper>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(mockOnValueUpdate).toHaveBeenCalledWith(
|
||||||
|
'env',
|
||||||
|
'env-id',
|
||||||
|
'production', // first option by default
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('keeps existing selectedValue when it is present in new options', async () => {
|
||||||
|
(dashboardVariablesQuery as jest.Mock).mockResolvedValue({
|
||||||
|
statusCode: 200,
|
||||||
|
payload: { variableValues: ['production', 'staging'] },
|
||||||
|
});
|
||||||
|
|
||||||
|
const variable = createVariable({ selectedValue: 'staging' });
|
||||||
|
setVariableLoading('env');
|
||||||
|
|
||||||
|
const queryClient = createTestQueryClient();
|
||||||
|
render(
|
||||||
|
<Wrapper queryClient={queryClient}>
|
||||||
|
<QueryVariableInput
|
||||||
|
variableData={variable}
|
||||||
|
existingVariables={{ 'env-id': variable }}
|
||||||
|
onValueUpdate={mockOnValueUpdate}
|
||||||
|
/>
|
||||||
|
</Wrapper>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(mockOnValueUpdate).toHaveBeenCalledWith(
|
||||||
|
'env',
|
||||||
|
'env-id',
|
||||||
|
'staging',
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('selects all new options when allSelected=true and value is missing from new options', async () => {
|
||||||
|
(dashboardVariablesQuery as jest.Mock).mockResolvedValue({
|
||||||
|
statusCode: 200,
|
||||||
|
payload: { variableValues: ['production', 'staging'] },
|
||||||
|
});
|
||||||
|
|
||||||
|
const variable = createVariable({
|
||||||
|
selectedValue: ['old-env'],
|
||||||
|
allSelected: true,
|
||||||
|
multiSelect: true,
|
||||||
|
showALLOption: true,
|
||||||
|
});
|
||||||
|
setVariableLoading('env');
|
||||||
|
|
||||||
|
const queryClient = createTestQueryClient();
|
||||||
|
render(
|
||||||
|
<Wrapper queryClient={queryClient}>
|
||||||
|
<QueryVariableInput
|
||||||
|
variableData={variable}
|
||||||
|
existingVariables={{ 'env-id': variable }}
|
||||||
|
onValueUpdate={mockOnValueUpdate}
|
||||||
|
/>
|
||||||
|
</Wrapper>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(mockOnValueUpdate).toHaveBeenCalledWith(
|
||||||
|
'env',
|
||||||
|
'env-id',
|
||||||
|
['production', 'staging'],
|
||||||
|
true,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('does not call onValueUpdate a second time when options have not changed', async () => {
|
||||||
|
const mockQueryFn = jest.fn().mockResolvedValue({
|
||||||
|
statusCode: 200,
|
||||||
|
payload: { variableValues: ['production', 'staging'] },
|
||||||
|
});
|
||||||
|
(dashboardVariablesQuery as jest.Mock).mockImplementation(mockQueryFn);
|
||||||
|
|
||||||
|
const variable = createVariable({ selectedValue: 'production' });
|
||||||
|
setVariableLoading('env');
|
||||||
|
|
||||||
|
const queryClient = createTestQueryClient();
|
||||||
|
render(
|
||||||
|
<Wrapper queryClient={queryClient}>
|
||||||
|
<QueryVariableInput
|
||||||
|
variableData={variable}
|
||||||
|
existingVariables={{ 'env-id': variable }}
|
||||||
|
onValueUpdate={mockOnValueUpdate}
|
||||||
|
/>
|
||||||
|
</Wrapper>,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Wait for first fetch and onValueUpdate call
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(mockOnValueUpdate).toHaveBeenCalledTimes(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
mockOnValueUpdate.mockClear();
|
||||||
|
|
||||||
|
// Trigger a second fetch cycle with the same API response
|
||||||
|
act(() => {
|
||||||
|
variableFetchStore.update((draft) => {
|
||||||
|
draft.states['env'] = 'revalidating';
|
||||||
|
draft.cycleIds['env'] = (draft.cycleIds['env'] || 0) + 1;
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Wait for second query to fire
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(mockQueryFn).toHaveBeenCalledTimes(2);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Options are unchanged, so onValueUpdate must not fire again
|
||||||
|
expect(mockOnValueUpdate).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('does not call onValueUpdate when API returns a non-array response', async () => {
|
||||||
|
(dashboardVariablesQuery as jest.Mock).mockResolvedValue({
|
||||||
|
statusCode: 200,
|
||||||
|
payload: { variableValues: null },
|
||||||
|
});
|
||||||
|
|
||||||
|
const variable = createVariable({ selectedValue: 'production' });
|
||||||
|
setVariableLoading('env');
|
||||||
|
|
||||||
|
const queryClient = createTestQueryClient();
|
||||||
|
render(
|
||||||
|
<Wrapper queryClient={queryClient}>
|
||||||
|
<QueryVariableInput
|
||||||
|
variableData={variable}
|
||||||
|
existingVariables={{ 'env-id': variable }}
|
||||||
|
onValueUpdate={mockOnValueUpdate}
|
||||||
|
/>
|
||||||
|
</Wrapper>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(dashboardVariablesQuery).toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(mockOnValueUpdate).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('does not fire the query when variableData.name is empty', () => {
|
||||||
|
(dashboardVariablesQuery as jest.Mock).mockResolvedValue({
|
||||||
|
statusCode: 200,
|
||||||
|
payload: { variableValues: ['production'] },
|
||||||
|
});
|
||||||
|
|
||||||
|
// Variable with no name — useVariableFetchState will be called with ''
|
||||||
|
// and the query key will have an empty name, leaving it disabled
|
||||||
|
const variable = createVariable({ name: '' });
|
||||||
|
// Note: we do NOT put it in 'loading' state since name is empty
|
||||||
|
// (no variableFetchStore entry for '' means isVariableFetching=false)
|
||||||
|
|
||||||
|
const queryClient = createTestQueryClient();
|
||||||
|
render(
|
||||||
|
<Wrapper queryClient={queryClient}>
|
||||||
|
<QueryVariableInput
|
||||||
|
variableData={variable}
|
||||||
|
existingVariables={{ 'env-id': variable }}
|
||||||
|
onValueUpdate={mockOnValueUpdate}
|
||||||
|
/>
|
||||||
|
</Wrapper>,
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(dashboardVariablesQuery).not.toHaveBeenCalled();
|
||||||
|
expect(mockOnValueUpdate).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -46,6 +46,9 @@ interface UseDashboardVariableSelectHelperReturn {
|
|||||||
applyDefaultIfNeeded: (
|
applyDefaultIfNeeded: (
|
||||||
overrideOptions?: (string | number | boolean)[],
|
overrideOptions?: (string | number | boolean)[],
|
||||||
) => void;
|
) => void;
|
||||||
|
getDefaultValue: (
|
||||||
|
overrideOptions?: (string | number | boolean)[],
|
||||||
|
) => string | string[] | undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
// eslint-disable-next-line sonarjs/cognitive-complexity
|
// eslint-disable-next-line sonarjs/cognitive-complexity
|
||||||
@@ -248,5 +251,6 @@ export function useDashboardVariableSelectHelper({
|
|||||||
defaultValue,
|
defaultValue,
|
||||||
onChange,
|
onChange,
|
||||||
applyDefaultIfNeeded,
|
applyDefaultIfNeeded,
|
||||||
|
getDefaultValue,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -121,9 +121,23 @@ function BodyTitleRenderer({
|
|||||||
return (
|
return (
|
||||||
<TitleWrapper onClick={handleNodeClick}>
|
<TitleWrapper onClick={handleNodeClick}>
|
||||||
{typeof value !== 'object' && (
|
{typeof value !== 'object' && (
|
||||||
<Dropdown menu={menu} trigger={['click']}>
|
<span
|
||||||
<SettingOutlined style={{ marginRight: 8 }} className="hover-reveal" />
|
onClick={(e): void => {
|
||||||
</Dropdown>
|
e.stopPropagation();
|
||||||
|
e.preventDefault();
|
||||||
|
}}
|
||||||
|
onMouseDown={(e): void => e.preventDefault()}
|
||||||
|
>
|
||||||
|
<Dropdown
|
||||||
|
menu={menu}
|
||||||
|
trigger={['click']}
|
||||||
|
dropdownRender={(originNode): React.ReactNode => (
|
||||||
|
<div data-log-detail-ignore="true">{originNode}</div>
|
||||||
|
)}
|
||||||
|
>
|
||||||
|
<SettingOutlined style={{ marginRight: 8 }} className="hover-reveal" />
|
||||||
|
</Dropdown>
|
||||||
|
</span>
|
||||||
)}
|
)}
|
||||||
{title.toString()}{' '}
|
{title.toString()}{' '}
|
||||||
{!parentIsArray && typeof value !== 'object' && (
|
{!parentIsArray && typeof value !== 'object' && (
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ function FieldRenderer({ field }: FieldRendererProps): JSX.Element {
|
|||||||
<span className="field-renderer-container">
|
<span className="field-renderer-container">
|
||||||
{dataType && newField && logType ? (
|
{dataType && newField && logType ? (
|
||||||
<>
|
<>
|
||||||
<Tooltip placement="left" title={newField}>
|
<Tooltip placement="left" title={newField} mouseLeaveDelay={0}>
|
||||||
<Typography.Text ellipsis className="label">
|
<Typography.Text ellipsis className="label">
|
||||||
{newField}{' '}
|
{newField}{' '}
|
||||||
</Typography.Text>
|
</Typography.Text>
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ function Overview({
|
|||||||
handleChangeSelectedView,
|
handleChangeSelectedView,
|
||||||
}: Props): JSX.Element {
|
}: Props): JSX.Element {
|
||||||
const [isWrapWord, setIsWrapWord] = useState<boolean>(true);
|
const [isWrapWord, setIsWrapWord] = useState<boolean>(true);
|
||||||
const [isSearchVisible, setIsSearchVisible] = useState<boolean>(false);
|
const [isSearchVisible, setIsSearchVisible] = useState<boolean>(true);
|
||||||
const [isAttributesExpanded, setIsAttributesExpanded] = useState<boolean>(
|
const [isAttributesExpanded, setIsAttributesExpanded] = useState<boolean>(
|
||||||
true,
|
true,
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -245,7 +245,7 @@ function TableView({
|
|||||||
<Typography.Text>{renderedField}</Typography.Text>
|
<Typography.Text>{renderedField}</Typography.Text>
|
||||||
|
|
||||||
{traceId && (
|
{traceId && (
|
||||||
<Tooltip title="Inspect in Trace">
|
<Tooltip title="Inspect in Trace" mouseLeaveDelay={0}>
|
||||||
<Button
|
<Button
|
||||||
className="periscope-btn"
|
className="periscope-btn"
|
||||||
onClick={(
|
onClick={(
|
||||||
|
|||||||
@@ -0,0 +1,34 @@
|
|||||||
|
import { Color } from '@signozhq/design-tokens';
|
||||||
|
|
||||||
|
import { getColorsForSeverityLabels, isRedLike } from '../utils';
|
||||||
|
|
||||||
|
describe('getColorsForSeverityLabels', () => {
|
||||||
|
it('should return slate for blank labels', () => {
|
||||||
|
expect(getColorsForSeverityLabels('', 0)).toBe(Color.BG_SLATE_300);
|
||||||
|
expect(getColorsForSeverityLabels(' ', 0)).toBe(Color.BG_SLATE_300);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return correct colors for known severity variants', () => {
|
||||||
|
expect(getColorsForSeverityLabels('INFO', 0)).toBe(Color.BG_ROBIN_600);
|
||||||
|
expect(getColorsForSeverityLabels('ERROR', 0)).toBe(Color.BG_CHERRY_600);
|
||||||
|
expect(getColorsForSeverityLabels('WARN', 0)).toBe(Color.BG_AMBER_600);
|
||||||
|
expect(getColorsForSeverityLabels('DEBUG', 0)).toBe(Color.BG_AQUA_600);
|
||||||
|
expect(getColorsForSeverityLabels('TRACE', 0)).toBe(Color.BG_FOREST_600);
|
||||||
|
expect(getColorsForSeverityLabels('FATAL', 0)).toBe(Color.BG_SAKURA_600);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return non-red colors for unrecognized labels at any index', () => {
|
||||||
|
for (let i = 0; i < 30; i++) {
|
||||||
|
const color = getColorsForSeverityLabels('4', i);
|
||||||
|
expect(isRedLike(color)).toBe(false);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return non-red colors for numeric severity text', () => {
|
||||||
|
const numericLabels = ['1', '2', '4', '9', '13', '17', '21'];
|
||||||
|
numericLabels.forEach((label) => {
|
||||||
|
const color = getColorsForSeverityLabels(label, 0);
|
||||||
|
expect(isRedLike(color)).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -1,7 +1,16 @@
|
|||||||
import { Color } from '@signozhq/design-tokens';
|
import { Color } from '@signozhq/design-tokens';
|
||||||
import { themeColors } from 'constants/theme';
|
|
||||||
import { colors } from 'lib/getRandomColor';
|
import { colors } from 'lib/getRandomColor';
|
||||||
|
|
||||||
|
// Function to determine if a color is "red-like" based on its RGB values
|
||||||
|
export function isRedLike(hex: string): boolean {
|
||||||
|
const r = parseInt(hex.slice(1, 3), 16);
|
||||||
|
const g = parseInt(hex.slice(3, 5), 16);
|
||||||
|
const b = parseInt(hex.slice(5, 7), 16);
|
||||||
|
return r > 180 && r > g * 1.4 && r > b * 1.4;
|
||||||
|
}
|
||||||
|
|
||||||
|
const SAFE_FALLBACK_COLORS = colors.filter((c) => !isRedLike(c));
|
||||||
|
|
||||||
const SEVERITY_VARIANT_COLORS: Record<string, string> = {
|
const SEVERITY_VARIANT_COLORS: Record<string, string> = {
|
||||||
TRACE: Color.BG_FOREST_600,
|
TRACE: Color.BG_FOREST_600,
|
||||||
Trace: Color.BG_FOREST_500,
|
Trace: Color.BG_FOREST_500,
|
||||||
@@ -67,8 +76,13 @@ export function getColorsForSeverityLabels(
|
|||||||
label: string,
|
label: string,
|
||||||
index: number,
|
index: number,
|
||||||
): string {
|
): string {
|
||||||
// Check if we have a direct mapping for this severity variant
|
const trimmed = label.trim();
|
||||||
const variantColor = SEVERITY_VARIANT_COLORS[label.trim()];
|
|
||||||
|
if (!trimmed) {
|
||||||
|
return Color.BG_SLATE_300;
|
||||||
|
}
|
||||||
|
|
||||||
|
const variantColor = SEVERITY_VARIANT_COLORS[trimmed];
|
||||||
if (variantColor) {
|
if (variantColor) {
|
||||||
return variantColor;
|
return variantColor;
|
||||||
}
|
}
|
||||||
@@ -103,5 +117,8 @@ export function getColorsForSeverityLabels(
|
|||||||
return Color.BG_SAKURA_500;
|
return Color.BG_SAKURA_500;
|
||||||
}
|
}
|
||||||
|
|
||||||
return colors[index % colors.length] || themeColors.red;
|
return (
|
||||||
|
SAFE_FALLBACK_COLORS[index % SAFE_FALLBACK_COLORS.length] ||
|
||||||
|
Color.BG_SLATE_400
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -111,23 +111,19 @@ const InfinityTable = forwardRef<TableVirtuosoHandle, InfinityTableProps>(
|
|||||||
);
|
);
|
||||||
|
|
||||||
const itemContent = useCallback(
|
const itemContent = useCallback(
|
||||||
(index: number, log: Record<string, unknown>): JSX.Element => {
|
(index: number, log: Record<string, unknown>): JSX.Element => (
|
||||||
return (
|
<TableRow
|
||||||
<div key={log.id as string}>
|
tableColumns={tableColumns}
|
||||||
<TableRow
|
index={index}
|
||||||
tableColumns={tableColumns}
|
log={log}
|
||||||
index={index}
|
logs={tableViewProps.logs}
|
||||||
log={log}
|
hasActions
|
||||||
logs={tableViewProps.logs}
|
fontSize={tableViewProps.fontSize}
|
||||||
hasActions
|
onShowLogDetails={onSetActiveLog}
|
||||||
fontSize={tableViewProps.fontSize}
|
isActiveLog={activeLog?.id === log.id}
|
||||||
onShowLogDetails={onSetActiveLog}
|
onClearActiveLog={onCloseActiveLog}
|
||||||
isActiveLog={activeLog?.id === log.id}
|
/>
|
||||||
onClearActiveLog={onCloseActiveLog}
|
),
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
);
|
|
||||||
},
|
|
||||||
[
|
[
|
||||||
tableColumns,
|
tableColumns,
|
||||||
onSetActiveLog,
|
onSetActiveLog,
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import { useTranslation } from 'react-i18next';
|
import { useTranslation } from 'react-i18next';
|
||||||
import { Form } from 'antd';
|
import { Form } from 'antd';
|
||||||
import { initialQueryBuilderFormValuesMap } from 'constants/queryBuilder';
|
import { initialQueryBuilderFormValuesMap } from 'constants/queryBuilder';
|
||||||
import QueryBuilderSearch from 'container/QueryBuilder/filters/QueryBuilderSearch';
|
import QueryBuilderSearchV2 from 'container/QueryBuilder/filters/QueryBuilderSearchV2/QueryBuilderSearchV2';
|
||||||
import isEqual from 'lodash-es/isEqual';
|
import isEqual from 'lodash-es/isEqual';
|
||||||
import { TagFilter } from 'types/api/queryBuilder/queryBuilderData';
|
import { TagFilter } from 'types/api/queryBuilder/queryBuilderData';
|
||||||
|
|
||||||
@@ -30,7 +30,7 @@ function TagFilterInput({
|
|||||||
};
|
};
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<QueryBuilderSearch
|
<QueryBuilderSearchV2
|
||||||
query={query}
|
query={query}
|
||||||
onChange={onQueryChange}
|
onChange={onQueryChange}
|
||||||
placeholder={placeholder}
|
placeholder={placeholder}
|
||||||
|
|||||||
@@ -86,7 +86,7 @@ jest.mock('providers/preferences/sync/usePreferenceSync', () => ({
|
|||||||
}));
|
}));
|
||||||
|
|
||||||
const BASE_URL = ENVIRONMENT.baseURL;
|
const BASE_URL = ENVIRONMENT.baseURL;
|
||||||
const attributeKeysURL = `${BASE_URL}/api/v3/autocomplete/attribute_keys`;
|
const attributeKeysURL = `${BASE_URL}/api/v3/filter_suggestions`;
|
||||||
|
|
||||||
describe('PipelinePage container test', () => {
|
describe('PipelinePage container test', () => {
|
||||||
beforeAll(() => {
|
beforeAll(() => {
|
||||||
@@ -333,26 +333,34 @@ describe('PipelinePage container test', () => {
|
|||||||
ctx.json({
|
ctx.json({
|
||||||
status: 'success',
|
status: 'success',
|
||||||
data: {
|
data: {
|
||||||
attributeKeys: [
|
attributes: [
|
||||||
{
|
{
|
||||||
key: 'otelServiceName',
|
key: 'otelServiceName',
|
||||||
dataType: DataTypes.String,
|
dataType: DataTypes.String,
|
||||||
type: 'tag',
|
type: 'tag',
|
||||||
|
isColumn: false,
|
||||||
|
isJSON: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: 'service.name',
|
||||||
|
dataType: DataTypes.String,
|
||||||
|
type: 'resource',
|
||||||
|
isColumn: false,
|
||||||
|
isJSON: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
key: 'service.instance.id',
|
key: 'service.instance.id',
|
||||||
dataType: DataTypes.String,
|
dataType: DataTypes.String,
|
||||||
type: 'resource',
|
type: 'resource',
|
||||||
},
|
isColumn: false,
|
||||||
{
|
isJSON: false,
|
||||||
key: 'service.name',
|
|
||||||
dataType: DataTypes.String,
|
|
||||||
type: 'resource',
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
key: 'service.name',
|
key: 'service.name',
|
||||||
dataType: DataTypes.String,
|
dataType: DataTypes.String,
|
||||||
type: 'tag',
|
type: 'tag',
|
||||||
|
isColumn: false,
|
||||||
|
isJSON: false,
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -973,6 +973,7 @@ function QueryBuilderSearchV2(
|
|||||||
return (
|
return (
|
||||||
<div className="query-builder-search-v2">
|
<div className="query-builder-search-v2">
|
||||||
<Select
|
<Select
|
||||||
|
data-testid={'qb-search-select'}
|
||||||
ref={selectRef}
|
ref={selectRef}
|
||||||
// eslint-disable-next-line react/jsx-props-no-spreading
|
// eslint-disable-next-line react/jsx-props-no-spreading
|
||||||
{...(hasPopupContainer ? { getPopupContainer: popupContainer } : {})}
|
{...(hasPopupContainer ? { getPopupContainer: popupContainer } : {})}
|
||||||
|
|||||||
@@ -0,0 +1,94 @@
|
|||||||
|
import { act, renderHook } from '@testing-library/react';
|
||||||
|
import { useActiveLog } from 'hooks/logs/useActiveLog';
|
||||||
|
import { useIsTextSelected } from 'hooks/useIsTextSelected';
|
||||||
|
import { ILog } from 'types/api/logs/log';
|
||||||
|
|
||||||
|
import useLogDetailHandlers from '../useLogDetailHandlers';
|
||||||
|
|
||||||
|
jest.mock('hooks/logs/useActiveLog');
|
||||||
|
jest.mock('hooks/useIsTextSelected');
|
||||||
|
|
||||||
|
const mockOnSetActiveLog = jest.fn();
|
||||||
|
const mockOnClearActiveLog = jest.fn();
|
||||||
|
const mockOnAddToQuery = jest.fn();
|
||||||
|
const mockOnGroupByAttribute = jest.fn();
|
||||||
|
const mockIsTextSelected = jest.fn();
|
||||||
|
|
||||||
|
const mockLog: ILog = {
|
||||||
|
id: 'log-1',
|
||||||
|
timestamp: '2024-01-01T00:00:00Z',
|
||||||
|
date: '2024-01-01',
|
||||||
|
body: 'test log body',
|
||||||
|
severityText: 'INFO',
|
||||||
|
severityNumber: 9,
|
||||||
|
traceFlags: 0,
|
||||||
|
traceId: '',
|
||||||
|
spanID: '',
|
||||||
|
attributesString: {},
|
||||||
|
attributesInt: {},
|
||||||
|
attributesFloat: {},
|
||||||
|
resources_string: {},
|
||||||
|
scope_string: {},
|
||||||
|
attributes_string: {},
|
||||||
|
severity_text: '',
|
||||||
|
severity_number: 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
jest.clearAllMocks();
|
||||||
|
|
||||||
|
jest.mocked(useIsTextSelected).mockReturnValue(mockIsTextSelected);
|
||||||
|
|
||||||
|
jest.mocked(useActiveLog).mockReturnValue({
|
||||||
|
activeLog: null,
|
||||||
|
onSetActiveLog: mockOnSetActiveLog,
|
||||||
|
onClearActiveLog: mockOnClearActiveLog,
|
||||||
|
onAddToQuery: mockOnAddToQuery,
|
||||||
|
onGroupByAttribute: mockOnGroupByAttribute,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not open log detail when text is selected', () => {
|
||||||
|
mockIsTextSelected.mockReturnValue(true);
|
||||||
|
|
||||||
|
const { result } = renderHook(() => useLogDetailHandlers());
|
||||||
|
|
||||||
|
act(() => {
|
||||||
|
result.current.handleSetActiveLog(mockLog);
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(mockOnSetActiveLog).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should open log detail when no text is selected', () => {
|
||||||
|
mockIsTextSelected.mockReturnValue(false);
|
||||||
|
|
||||||
|
const { result } = renderHook(() => useLogDetailHandlers());
|
||||||
|
|
||||||
|
act(() => {
|
||||||
|
result.current.handleSetActiveLog(mockLog);
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(mockOnSetActiveLog).toHaveBeenCalledWith(mockLog);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should toggle off when clicking the same active log', () => {
|
||||||
|
mockIsTextSelected.mockReturnValue(false);
|
||||||
|
|
||||||
|
jest.mocked(useActiveLog).mockReturnValue({
|
||||||
|
activeLog: mockLog,
|
||||||
|
onSetActiveLog: mockOnSetActiveLog,
|
||||||
|
onClearActiveLog: mockOnClearActiveLog,
|
||||||
|
onAddToQuery: mockOnAddToQuery,
|
||||||
|
onGroupByAttribute: mockOnGroupByAttribute,
|
||||||
|
});
|
||||||
|
|
||||||
|
const { result } = renderHook(() => useLogDetailHandlers());
|
||||||
|
|
||||||
|
act(() => {
|
||||||
|
result.current.handleSetActiveLog(mockLog);
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(mockOnClearActiveLog).toHaveBeenCalled();
|
||||||
|
expect(mockOnSetActiveLog).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
@@ -1,15 +1,17 @@
|
|||||||
import { useCallback, useMemo, useState } from 'react';
|
import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
|
||||||
import { useQueryClient } from 'react-query';
|
import { useQueryClient } from 'react-query';
|
||||||
import { useDispatch, useSelector } from 'react-redux';
|
import { useDispatch, useSelector } from 'react-redux';
|
||||||
import { useHistory, useLocation } from 'react-router-dom';
|
import { useHistory, useLocation } from 'react-router-dom';
|
||||||
import { getAggregateKeys } from 'api/queryBuilder/getAttributeKeys';
|
import { getAggregateKeys } from 'api/queryBuilder/getAttributeKeys';
|
||||||
import { SOMETHING_WENT_WRONG } from 'constants/api';
|
import { SOMETHING_WENT_WRONG } from 'constants/api';
|
||||||
|
import { QueryParams } from 'constants/query';
|
||||||
import { OPERATORS, QueryBuilderKeys } from 'constants/queryBuilder';
|
import { OPERATORS, QueryBuilderKeys } from 'constants/queryBuilder';
|
||||||
import ROUTES from 'constants/routes';
|
import ROUTES from 'constants/routes';
|
||||||
import { MetricsType } from 'container/MetricsApplication/constant';
|
import { MetricsType } from 'container/MetricsApplication/constant';
|
||||||
import { getOperatorValue } from 'container/QueryBuilder/filters/QueryBuilderSearch/utils';
|
import { getOperatorValue } from 'container/QueryBuilder/filters/QueryBuilderSearch/utils';
|
||||||
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
|
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
|
||||||
import { useNotifications } from 'hooks/useNotifications';
|
import { useNotifications } from 'hooks/useNotifications';
|
||||||
|
import useUrlQuery from 'hooks/useUrlQuery';
|
||||||
import { getGeneratedFilterQueryString } from 'lib/getGeneratedFilterQueryString';
|
import { getGeneratedFilterQueryString } from 'lib/getGeneratedFilterQueryString';
|
||||||
import { chooseAutocompleteFromCustomValue } from 'lib/newQueryBuilder/chooseAutocompleteFromCustomValue';
|
import { chooseAutocompleteFromCustomValue } from 'lib/newQueryBuilder/chooseAutocompleteFromCustomValue';
|
||||||
import { AppState } from 'store/reducers';
|
import { AppState } from 'store/reducers';
|
||||||
@@ -54,6 +56,20 @@ export const useActiveLog = (): UseActiveLog => {
|
|||||||
|
|
||||||
const [activeLog, setActiveLog] = useState<ILog | null>(null);
|
const [activeLog, setActiveLog] = useState<ILog | null>(null);
|
||||||
|
|
||||||
|
// Close drawer/clear active log when query in URL changes
|
||||||
|
const urlQuery = useUrlQuery();
|
||||||
|
const compositeQuery = urlQuery.get(QueryParams.compositeQuery) ?? '';
|
||||||
|
const prevQueryRef = useRef<string | null>(null);
|
||||||
|
useEffect(() => {
|
||||||
|
if (
|
||||||
|
prevQueryRef.current !== null &&
|
||||||
|
prevQueryRef.current !== compositeQuery
|
||||||
|
) {
|
||||||
|
setActiveLog(null);
|
||||||
|
}
|
||||||
|
prevQueryRef.current = compositeQuery;
|
||||||
|
}, [compositeQuery]);
|
||||||
|
|
||||||
const onSetDetailedLogData = useCallback(
|
const onSetDetailedLogData = useCallback(
|
||||||
(logData: ILog) => {
|
(logData: ILog) => {
|
||||||
dispatch({
|
dispatch({
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ import { useCallback, useState } from 'react';
|
|||||||
import { VIEW_TYPES } from 'components/LogDetail/constants';
|
import { VIEW_TYPES } from 'components/LogDetail/constants';
|
||||||
import type { UseActiveLog } from 'hooks/logs/types';
|
import type { UseActiveLog } from 'hooks/logs/types';
|
||||||
import { useActiveLog } from 'hooks/logs/useActiveLog';
|
import { useActiveLog } from 'hooks/logs/useActiveLog';
|
||||||
|
import { useIsTextSelected } from 'hooks/useIsTextSelected';
|
||||||
import { ILog } from 'types/api/logs/log';
|
import { ILog } from 'types/api/logs/log';
|
||||||
|
|
||||||
type SelectedTab = typeof VIEW_TYPES[keyof typeof VIEW_TYPES] | undefined;
|
type SelectedTab = typeof VIEW_TYPES[keyof typeof VIEW_TYPES] | undefined;
|
||||||
@@ -28,9 +29,13 @@ function useLogDetailHandlers({
|
|||||||
onAddToQuery,
|
onAddToQuery,
|
||||||
} = useActiveLog();
|
} = useActiveLog();
|
||||||
const [selectedTab, setSelectedTab] = useState<SelectedTab>(defaultTab);
|
const [selectedTab, setSelectedTab] = useState<SelectedTab>(defaultTab);
|
||||||
|
const isTextSelected = useIsTextSelected();
|
||||||
|
|
||||||
const handleSetActiveLog = useCallback(
|
const handleSetActiveLog = useCallback(
|
||||||
(log: ILog, nextTab: SelectedTab = defaultTab): void => {
|
(log: ILog, nextTab: SelectedTab = defaultTab): void => {
|
||||||
|
if (isTextSelected()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
if (activeLog?.id === log.id) {
|
if (activeLog?.id === log.id) {
|
||||||
onClearActiveLog();
|
onClearActiveLog();
|
||||||
setSelectedTab(undefined);
|
setSelectedTab(undefined);
|
||||||
@@ -39,7 +44,7 @@ function useLogDetailHandlers({
|
|||||||
onSetActiveLog(log);
|
onSetActiveLog(log);
|
||||||
setSelectedTab(nextTab ?? defaultTab);
|
setSelectedTab(nextTab ?? defaultTab);
|
||||||
},
|
},
|
||||||
[activeLog?.id, defaultTab, onClearActiveLog, onSetActiveLog],
|
[activeLog?.id, defaultTab, onClearActiveLog, onSetActiveLog, isTextSelected],
|
||||||
);
|
);
|
||||||
|
|
||||||
const handleCloseLogDetail = useCallback((): void => {
|
const handleCloseLogDetail = useCallback((): void => {
|
||||||
|
|||||||
10
frontend/src/hooks/useIsTextSelected.ts
Normal file
10
frontend/src/hooks/useIsTextSelected.ts
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
import { useCallback } from 'react';
|
||||||
|
|
||||||
|
export function useIsTextSelected(): () => boolean {
|
||||||
|
return useCallback((): boolean => {
|
||||||
|
const selection = window.getSelection();
|
||||||
|
return (
|
||||||
|
!!selection && !selection.isCollapsed && selection.toString().length > 0
|
||||||
|
);
|
||||||
|
}, []);
|
||||||
|
}
|
||||||
44
pkg/http/middleware/recovery.go
Normal file
44
pkg/http/middleware/recovery.go
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
package middleware
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log/slog"
|
||||||
|
"net/http"
|
||||||
|
"runtime/debug"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
|
"github.com/SigNoz/signoz/pkg/http/render"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Recovery is a middleware that recovers from panics, logs the panic,
|
||||||
|
// and returns a 500 Internal Server Error.
|
||||||
|
type Recovery struct {
|
||||||
|
logger *slog.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRecovery creates a new Recovery middleware.
|
||||||
|
func NewRecovery(logger *slog.Logger) Wrapper {
|
||||||
|
return &Recovery{
|
||||||
|
logger: logger.With("pkg", "http-middleware-recovery"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wrap is the middleware handler.
|
||||||
|
func (m *Recovery) Wrap(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
defer func() {
|
||||||
|
if err := recover(); err != nil {
|
||||||
|
m.logger.ErrorContext(
|
||||||
|
r.Context(),
|
||||||
|
"panic recovered",
|
||||||
|
"err", err, "stack", string(debug.Stack()),
|
||||||
|
)
|
||||||
|
|
||||||
|
render.Error(w, errors.NewInternalf(
|
||||||
|
errors.CodeInternal, "internal server error",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
})
|
||||||
|
}
|
||||||
27
pkg/modules/cloudintegrations/cloudintegrations.go
Normal file
27
pkg/modules/cloudintegrations/cloudintegrations.go
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
package cloudintegrations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// start with moving the agent functions here to get review
|
||||||
|
|
||||||
|
// type Module interface {
|
||||||
|
// AgentCheckIn(http.ResponseWriter, *http.Request)
|
||||||
|
// }
|
||||||
|
|
||||||
|
type Handler interface {
|
||||||
|
AgentCheckIn(http.ResponseWriter, *http.Request)
|
||||||
|
ListServices(http.ResponseWriter, *http.Request)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Module interface {
|
||||||
|
AgentCheckIn(ctx context.Context, req *PostableAgentCheckInPayload) (any, error)
|
||||||
|
ListServices(ctx context.Context, orgID string, cloudProvider string, cloudAccountId *string) (any, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// store interface will be in the types package
|
||||||
|
type CloudProvider interface {
|
||||||
|
ListServices(ctx context.Context, orgID string, cloudAccountId *string) (any, error)
|
||||||
|
}
|
||||||
@@ -0,0 +1,99 @@
|
|||||||
|
package implcloudintergations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
|
"github.com/SigNoz/signoz/pkg/http/render"
|
||||||
|
"github.com/SigNoz/signoz/pkg/modules/cloudintegrations"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
)
|
||||||
|
|
||||||
|
type handler struct {
|
||||||
|
module cloudintegrations.Module
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHandler(module cloudintegrations.Module) *handler {
|
||||||
|
return &handler{
|
||||||
|
module: module,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *handler) CloudIntegrationsAgentCheckIn(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
claims, err := authtypes.ClaimsFromContext(ctx)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(rw, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cloudProviderString := mux.Vars(r)["cloudProvider"]
|
||||||
|
|
||||||
|
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(rw, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
req := new(integrationtypes.PostableAgentCheckInPayload)
|
||||||
|
if err = json.NewDecoder(r.Body).Decode(req); err != nil {
|
||||||
|
render.Error(rw, errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid request body"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
req.OrgID = claims.OrgID
|
||||||
|
|
||||||
|
// we need to get the config
|
||||||
|
|
||||||
|
resp, err := h.cloudIntegrationsRegistry[cloudProvider].AgentCheckIn(r.Context(), req)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(rw, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
render.Success(rw, http.StatusOK, resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *handler) ListServices(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||||
|
if err != nil {
|
||||||
|
render.Error(rw, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cloudProviderString := mux.Vars(r)["cloudProvider"]
|
||||||
|
|
||||||
|
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(rw, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var cloudAccountId *string
|
||||||
|
|
||||||
|
cloudAccountIdQP := r.URL.Query().Get("cloud_account_id")
|
||||||
|
if len(cloudAccountIdQP) > 0 {
|
||||||
|
cloudAccountId = &cloudAccountIdQP
|
||||||
|
}
|
||||||
|
|
||||||
|
// give me the provider and then use it
|
||||||
|
|
||||||
|
resp, err := h.module.ListServices(ctx, claims.OrgID, cloudProvider, cloudAccountId)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(rw, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
render.Success(rw, http.StatusOK, resp)
|
||||||
|
|
||||||
|
}
|
||||||
175
pkg/modules/cloudintegrations/implcloudintergations/store.go
Normal file
175
pkg/modules/cloudintegrations/implcloudintergations/store.go
Normal file
@@ -0,0 +1,175 @@
|
|||||||
|
package implcloudintergations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
|
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||||
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type store struct {
|
||||||
|
sqlstore sqlstore.SQLStore
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewStore(sqlstore sqlstore.SQLStore) integrationtypes.Store {
|
||||||
|
return &store{sqlstore: sqlstore}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *store) ListConnected(
|
||||||
|
ctx context.Context, orgId string, cloudProvider string,
|
||||||
|
) ([]integrationtypes.CloudIntegration, error) {
|
||||||
|
accounts := []integrationtypes.CloudIntegration{}
|
||||||
|
|
||||||
|
err := s.sqlstore.BunDB().NewSelect().
|
||||||
|
Model(&accounts).
|
||||||
|
Where("org_id = ?", orgId).
|
||||||
|
Where("provider = ?", cloudProvider).
|
||||||
|
Where("removed_at is NULL").
|
||||||
|
Where("account_id is not NULL").
|
||||||
|
Where("last_agent_report is not NULL").
|
||||||
|
Order("created_at").
|
||||||
|
Scan(ctx)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
slog.ErrorContext(ctx, "error querying connected cloud accounts", "error", err)
|
||||||
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "could not query connected cloud accounts")
|
||||||
|
}
|
||||||
|
|
||||||
|
return accounts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *store) Get(
|
||||||
|
ctx context.Context, orgId string, provider string, id string,
|
||||||
|
) (*integrationtypes.CloudIntegration, error) {
|
||||||
|
var result integrationtypes.CloudIntegration
|
||||||
|
|
||||||
|
err := s.sqlstore.BunDB().NewSelect().
|
||||||
|
Model(&result).
|
||||||
|
Where("org_id = ?", orgId).
|
||||||
|
Where("provider = ?", provider).
|
||||||
|
Where("id = ?", id).
|
||||||
|
Scan(ctx)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return nil, s.sqlstore.WrapNotFoundErrf(
|
||||||
|
err,
|
||||||
|
integrationtypes.ErrCodeCloudIntegrationAccountNotFound,
|
||||||
|
"couldn't find account with Id %s", id,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't query cloud provider account")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *store) GetConnectedCloudAccount(
|
||||||
|
ctx context.Context, orgId string, provider string, accountId string,
|
||||||
|
) (*integrationtypes.CloudIntegration, error) {
|
||||||
|
var result integrationtypes.CloudIntegration
|
||||||
|
|
||||||
|
err := s.sqlstore.BunDB().NewSelect().
|
||||||
|
Model(&result).
|
||||||
|
Where("org_id = ?", orgId).
|
||||||
|
Where("provider = ?", provider).
|
||||||
|
Where("account_id = ?", accountId).
|
||||||
|
Where("last_agent_report is not NULL").
|
||||||
|
Where("removed_at is NULL").
|
||||||
|
Scan(ctx)
|
||||||
|
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return nil, s.sqlstore.WrapNotFoundErrf(err, integrationtypes.ErrCodeCloudIntegrationAccountNotFound, "couldn't find connected cloud account %s", accountId)
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't query cloud provider account")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *store) Upsert(
|
||||||
|
ctx context.Context,
|
||||||
|
orgId string,
|
||||||
|
provider string,
|
||||||
|
id *string,
|
||||||
|
config []byte,
|
||||||
|
accountId *string,
|
||||||
|
agentReport *integrationtypes.AgentReport,
|
||||||
|
removedAt *time.Time,
|
||||||
|
) (*integrationtypes.CloudIntegration, error) {
|
||||||
|
if id == nil {
|
||||||
|
temp := valuer.GenerateUUID().StringValue()
|
||||||
|
id = &temp
|
||||||
|
}
|
||||||
|
|
||||||
|
onConflictSetStmts := []string{}
|
||||||
|
setColStatement := func(col string) string {
|
||||||
|
return fmt.Sprintf("%s=excluded.%s", col, col)
|
||||||
|
}
|
||||||
|
|
||||||
|
if config != nil {
|
||||||
|
onConflictSetStmts = append(onConflictSetStmts, setColStatement("config"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if accountId != nil {
|
||||||
|
onConflictSetStmts = append(onConflictSetStmts, setColStatement("account_id"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if agentReport != nil {
|
||||||
|
onConflictSetStmts = append(onConflictSetStmts, setColStatement("last_agent_report"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if removedAt != nil {
|
||||||
|
onConflictSetStmts = append(onConflictSetStmts, setColStatement("removed_at"))
|
||||||
|
}
|
||||||
|
|
||||||
|
onConflictSetStmts = append(onConflictSetStmts, setColStatement("updated_at"))
|
||||||
|
|
||||||
|
onConflictClause := ""
|
||||||
|
if len(onConflictSetStmts) > 0 {
|
||||||
|
onConflictClause = fmt.Sprintf(
|
||||||
|
"conflict(id, provider, org_id) do update SET\n%s",
|
||||||
|
strings.Join(onConflictSetStmts, ",\n"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
integration := integrationtypes.CloudIntegration{
|
||||||
|
OrgID: orgId,
|
||||||
|
Provider: provider,
|
||||||
|
Identifiable: types.Identifiable{ID: valuer.MustNewUUID(*id)},
|
||||||
|
TimeAuditable: types.TimeAuditable{
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
UpdatedAt: time.Now(),
|
||||||
|
},
|
||||||
|
Config: string(config),
|
||||||
|
AccountID: accountId,
|
||||||
|
LastAgentReport: agentReport,
|
||||||
|
RemovedAt: removedAt,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := s.sqlstore.BunDB().NewInsert().
|
||||||
|
Model(&integration).
|
||||||
|
On(onConflictClause).
|
||||||
|
Exec(ctx)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't upsert cloud integration account")
|
||||||
|
}
|
||||||
|
|
||||||
|
upsertedAccount, err := s.Get(ctx, orgId, provider, *id)
|
||||||
|
if err != nil {
|
||||||
|
slog.ErrorContext(ctx, "error upserting cloud integration account", "error", err)
|
||||||
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't get upserted cloud integration account")
|
||||||
|
}
|
||||||
|
|
||||||
|
return upsertedAccount, nil
|
||||||
|
}
|
||||||
@@ -87,7 +87,7 @@ func (m *module) ListPromotedAndIndexedPaths(ctx context.Context) ([]promotetype
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *module) listPromotedPaths(ctx context.Context) ([]string, error) {
|
func (m *module) listPromotedPaths(ctx context.Context) ([]string, error) {
|
||||||
paths, err := m.metadataStore.ListPromotedPaths(ctx)
|
paths, err := m.metadataStore.GetPromotedPaths(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -142,7 +142,7 @@ func (m *module) PromoteAndIndexPaths(
|
|||||||
pathsStr = append(pathsStr, path.Path)
|
pathsStr = append(pathsStr, path.Path)
|
||||||
}
|
}
|
||||||
|
|
||||||
existingPromotedPaths, err := m.metadataStore.ListPromotedPaths(ctx, pathsStr...)
|
existingPromotedPaths, err := m.metadataStore.GetPromotedPaths(ctx, pathsStr...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
root "github.com/SigNoz/signoz/pkg/modules/user"
|
root "github.com/SigNoz/signoz/pkg/modules/user"
|
||||||
"github.com/SigNoz/signoz/pkg/types"
|
"github.com/SigNoz/signoz/pkg/types"
|
||||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||||
"github.com/SigNoz/signoz/pkg/valuer"
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
)
|
)
|
||||||
@@ -462,7 +463,7 @@ func (h *handler) UpdateAPIKey(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(createdByUser.Email.String())) {
|
if slices.Contains(integrationtypes.CloudIntegrationUserEmails, createdByUser.Email) {
|
||||||
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "API Keys for integration users cannot be revoked"))
|
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "API Keys for integration users cannot be revoked"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -507,7 +508,7 @@ func (h *handler) RevokeAPIKey(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(createdByUser.Email.String())) {
|
if slices.Contains(integrationtypes.CloudIntegrationUserEmails, createdByUser.Email) {
|
||||||
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "API Keys for integration users cannot be revoked"))
|
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "API Keys for integration users cannot be revoked"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ import (
|
|||||||
"github.com/SigNoz/signoz/pkg/types"
|
"github.com/SigNoz/signoz/pkg/types"
|
||||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||||
"github.com/SigNoz/signoz/pkg/types/emailtypes"
|
"github.com/SigNoz/signoz/pkg/types/emailtypes"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||||
"github.com/SigNoz/signoz/pkg/types/roletypes"
|
"github.com/SigNoz/signoz/pkg/types/roletypes"
|
||||||
"github.com/SigNoz/signoz/pkg/valuer"
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
"github.com/dustin/go-humanize"
|
"github.com/dustin/go-humanize"
|
||||||
@@ -173,7 +174,7 @@ func (m *Module) DeleteInvite(ctx context.Context, orgID string, id valuer.UUID)
|
|||||||
func (module *Module) CreateUser(ctx context.Context, input *types.User, opts ...root.CreateUserOption) error {
|
func (module *Module) CreateUser(ctx context.Context, input *types.User, opts ...root.CreateUserOption) error {
|
||||||
createUserOpts := root.NewCreateUserOptions(opts...)
|
createUserOpts := root.NewCreateUserOptions(opts...)
|
||||||
|
|
||||||
// since assign is idempotant multiple calls to assign won't cause issues in case of retries.
|
// since assign is idempotent multiple calls to assign won't cause issues in case of retries.
|
||||||
err := module.authz.Grant(ctx, input.OrgID, roletypes.MustGetSigNozManagedRoleFromExistingRole(input.Role), authtypes.MustNewSubject(authtypes.TypeableUser, input.ID.StringValue(), input.OrgID, nil))
|
err := module.authz.Grant(ctx, input.OrgID, roletypes.MustGetSigNozManagedRoleFromExistingRole(input.Role), authtypes.MustNewSubject(authtypes.TypeableUser, input.ID.StringValue(), input.OrgID, nil))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -279,7 +280,7 @@ func (module *Module) DeleteUser(ctx context.Context, orgID valuer.UUID, id stri
|
|||||||
return errors.WithAdditionalf(err, "cannot delete root user")
|
return errors.WithAdditionalf(err, "cannot delete root user")
|
||||||
}
|
}
|
||||||
|
|
||||||
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(user.Email.String())) {
|
if slices.Contains(integrationtypes.CloudIntegrationUserEmails, user.Email) {
|
||||||
return errors.New(errors.TypeForbidden, errors.CodeForbidden, "integration user cannot be deleted")
|
return errors.New(errors.TypeForbidden, errors.CodeForbidden, "integration user cannot be deleted")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -293,7 +294,7 @@ func (module *Module) DeleteUser(ctx context.Context, orgID valuer.UUID, id stri
|
|||||||
return errors.New(errors.TypeForbidden, errors.CodeForbidden, "cannot delete the last admin")
|
return errors.New(errors.TypeForbidden, errors.CodeForbidden, "cannot delete the last admin")
|
||||||
}
|
}
|
||||||
|
|
||||||
// since revoke is idempotant multiple calls to revoke won't cause issues in case of retries
|
// since revoke is idempotent multiple calls to revoke won't cause issues in case of retries
|
||||||
err = module.authz.Revoke(ctx, orgID, roletypes.MustGetSigNozManagedRoleFromExistingRole(user.Role), authtypes.MustNewSubject(authtypes.TypeableUser, id, orgID, nil))
|
err = module.authz.Revoke(ctx, orgID, roletypes.MustGetSigNozManagedRoleFromExistingRole(user.Role), authtypes.MustNewSubject(authtypes.TypeableUser, id, orgID, nil))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -0,0 +1,571 @@
|
|||||||
|
package baseprovider
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
|
"github.com/SigNoz/signoz/pkg/querier"
|
||||||
|
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
|
||||||
|
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/store"
|
||||||
|
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||||
|
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||||
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
CodeDashboardNotFound = errors.MustNewCode("dashboard_not_found")
|
||||||
|
)
|
||||||
|
|
||||||
|
// hasValidTimeSeriesData checks if a query response contains valid time series data
|
||||||
|
// with at least one aggregation, series, and value
|
||||||
|
func hasValidTimeSeriesData(queryResponse *qbtypes.TimeSeriesData) bool {
|
||||||
|
return queryResponse != nil &&
|
||||||
|
len(queryResponse.Aggregations) > 0 &&
|
||||||
|
len(queryResponse.Aggregations[0].Series) > 0 &&
|
||||||
|
len(queryResponse.Aggregations[0].Series[0].Values) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type BaseCloudProvider[def integrationtypes.Definition, conf integrationtypes.ServiceConfigTyped[def]] struct {
|
||||||
|
Logger *slog.Logger
|
||||||
|
Querier querier.Querier
|
||||||
|
AccountsRepo store.CloudProviderAccountsRepository
|
||||||
|
ServiceConfigRepo store.ServiceConfigDatabase
|
||||||
|
ServiceDefinitions *services.ServicesProvider[def]
|
||||||
|
ProviderType integrationtypes.CloudProviderType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BaseCloudProvider[def, conf]) GetName() integrationtypes.CloudProviderType {
|
||||||
|
return b.ProviderType
|
||||||
|
}
|
||||||
|
|
||||||
|
// AgentCheckIn is a helper function that handles common agent check-in logic.
|
||||||
|
// The getAgentConfigFunc should return the provider-specific agent configuration.
|
||||||
|
func AgentCheckIn[def integrationtypes.Definition, conf integrationtypes.ServiceConfigTyped[def], AgentConfigT any](
|
||||||
|
b *BaseCloudProvider[def, conf],
|
||||||
|
ctx context.Context,
|
||||||
|
req *integrationtypes.PostableAgentCheckInPayload,
|
||||||
|
getAgentConfigFunc func(context.Context, *integrationtypes.CloudIntegration) (*AgentConfigT, error),
|
||||||
|
) (*integrationtypes.GettableAgentCheckInRes[AgentConfigT], error) {
|
||||||
|
// agent can't check in unless the account is already created
|
||||||
|
existingAccount, err := b.AccountsRepo.Get(ctx, req.OrgID, b.GetName().String(), req.ID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if existingAccount != nil && existingAccount.AccountID != nil && *existingAccount.AccountID != req.AccountID {
|
||||||
|
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput,
|
||||||
|
"can't check in with new %s account id %s for account %s with existing %s id %s",
|
||||||
|
b.GetName().String(), req.AccountID, existingAccount.ID.StringValue(), b.GetName().String(),
|
||||||
|
*existingAccount.AccountID,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
existingAccount, err = b.AccountsRepo.GetConnectedCloudAccount(ctx, req.OrgID, b.GetName().String(), req.AccountID)
|
||||||
|
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if existingAccount != nil && existingAccount.ID.StringValue() != req.ID {
|
||||||
|
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput,
|
||||||
|
"can't check in to %s account %s with id %s. already connected with id %s",
|
||||||
|
b.GetName().String(), req.AccountID, req.ID, existingAccount.ID.StringValue(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
agentReport := integrationtypes.AgentReport{
|
||||||
|
TimestampMillis: time.Now().UnixMilli(),
|
||||||
|
Data: req.Data,
|
||||||
|
}
|
||||||
|
|
||||||
|
account, err := b.AccountsRepo.Upsert(
|
||||||
|
ctx, req.OrgID, b.GetName().String(), &req.ID, nil, &req.AccountID, &agentReport, nil,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
agentConfig, err := getAgentConfigFunc(ctx, account)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &integrationtypes.GettableAgentCheckInRes[AgentConfigT]{
|
||||||
|
AccountId: account.ID.StringValue(),
|
||||||
|
CloudAccountId: *account.AccountID,
|
||||||
|
RemovedAt: account.RemovedAt,
|
||||||
|
IntegrationConfig: *agentConfig,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BaseCloudProvider[def, conf]) GetAccountStatus(ctx context.Context, orgID, accountID string) (*integrationtypes.GettableAccountStatus, error) {
|
||||||
|
accountRecord, err := b.AccountsRepo.Get(ctx, orgID, b.ProviderType.String(), accountID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &integrationtypes.GettableAccountStatus{
|
||||||
|
Id: accountRecord.ID.String(),
|
||||||
|
CloudAccountId: accountRecord.AccountID,
|
||||||
|
Status: accountRecord.Status(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BaseCloudProvider[def, conf]) ListConnectedAccounts(ctx context.Context, orgID string) (*integrationtypes.GettableConnectedAccountsList, error) {
|
||||||
|
accountRecords, err := b.AccountsRepo.ListConnected(ctx, orgID, b.ProviderType.String())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
connectedAccounts := make([]*integrationtypes.Account, 0, len(accountRecords))
|
||||||
|
for _, r := range accountRecords {
|
||||||
|
connectedAccounts = append(connectedAccounts, r.Account(b.ProviderType))
|
||||||
|
}
|
||||||
|
|
||||||
|
return &integrationtypes.GettableConnectedAccountsList{
|
||||||
|
Accounts: connectedAccounts,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BaseCloudProvider[def, conf]) DisconnectAccount(ctx context.Context, orgID, accountID string) (*integrationtypes.CloudIntegration, error) {
|
||||||
|
account, err := b.AccountsRepo.Get(ctx, orgID, b.ProviderType.String(), accountID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
tsNow := time.Now()
|
||||||
|
account, err = b.AccountsRepo.Upsert(
|
||||||
|
ctx, orgID, b.ProviderType.String(), &accountID, nil, nil, nil, &tsNow,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return account, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BaseCloudProvider[def, conf]) GetDashboard(ctx context.Context, id string, orgID valuer.UUID) (*dashboardtypes.Dashboard, error) {
|
||||||
|
allDashboards, err := b.GetAvailableDashboards(ctx, orgID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range allDashboards {
|
||||||
|
if d.ID == id {
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.NewNotFoundf(CodeDashboardNotFound, "dashboard with id %s not found", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BaseCloudProvider[def, conf]) GetServiceConnectionStatus(
|
||||||
|
ctx context.Context,
|
||||||
|
cloudAccountID string,
|
||||||
|
orgID valuer.UUID,
|
||||||
|
definition def,
|
||||||
|
isMetricsEnabled bool,
|
||||||
|
isLogsEnabled bool,
|
||||||
|
) (*integrationtypes.ServiceConnectionStatus, error) {
|
||||||
|
ingestionStatusCheck := definition.GetIngestionStatusCheck()
|
||||||
|
if ingestionStatusCheck == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := new(integrationtypes.ServiceConnectionStatus)
|
||||||
|
|
||||||
|
wg := sync.WaitGroup{}
|
||||||
|
|
||||||
|
if len(ingestionStatusCheck.Metrics) > 0 && isMetricsEnabled {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer utils.RecoverPanic(func(err interface{}, stack []byte) {
|
||||||
|
b.Logger.ErrorContext(
|
||||||
|
ctx, "panic while getting service metrics connection status",
|
||||||
|
"service", definition.GetId(),
|
||||||
|
"error", err,
|
||||||
|
"stack", string(stack),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
defer wg.Done()
|
||||||
|
status, _ := b.getServiceMetricsConnectionStatus(ctx, cloudAccountID, orgID, definition)
|
||||||
|
resp.Metrics = status
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ingestionStatusCheck.Logs) > 0 && isLogsEnabled {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer utils.RecoverPanic(func(err interface{}, stack []byte) {
|
||||||
|
b.Logger.ErrorContext(
|
||||||
|
ctx, "panic while getting service logs connection status",
|
||||||
|
"service", definition.GetId(),
|
||||||
|
"error", err,
|
||||||
|
"stack", string(stack),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
defer wg.Done()
|
||||||
|
status, _ := b.getServiceLogsConnectionStatus(ctx, cloudAccountID, orgID, definition)
|
||||||
|
resp.Logs = status
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BaseCloudProvider[def, conf]) getServiceMetricsConnectionStatus(
|
||||||
|
ctx context.Context,
|
||||||
|
cloudAccountID string,
|
||||||
|
orgID valuer.UUID,
|
||||||
|
definition def,
|
||||||
|
) ([]*integrationtypes.SignalConnectionStatus, error) {
|
||||||
|
ingestionStatusCheck := definition.GetIngestionStatusCheck()
|
||||||
|
if ingestionStatusCheck == nil || len(ingestionStatusCheck.Metrics) < 1 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
statusResp := make([]*integrationtypes.SignalConnectionStatus, 0)
|
||||||
|
|
||||||
|
for _, metric := range ingestionStatusCheck.Metrics {
|
||||||
|
statusResp = append(statusResp, &integrationtypes.SignalConnectionStatus{
|
||||||
|
CategoryID: metric.Category,
|
||||||
|
CategoryDisplayName: metric.DisplayName,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
for index, category := range ingestionStatusCheck.Metrics {
|
||||||
|
queries := make([]qbtypes.QueryEnvelope, 0)
|
||||||
|
|
||||||
|
for _, check := range category.Checks {
|
||||||
|
// TODO: make sure all the cloud providers send these two attributes
|
||||||
|
// or create map of provider specific filter expression
|
||||||
|
filterExpression := fmt.Sprintf(`cloud.provider="%s" AND cloud.account.id="%s"`, b.ProviderType.String(), cloudAccountID)
|
||||||
|
f := ""
|
||||||
|
for _, attribute := range check.Attributes {
|
||||||
|
f = fmt.Sprintf("%s %s", attribute.Name, attribute.Operator)
|
||||||
|
if attribute.Value != "" {
|
||||||
|
f = fmt.Sprintf("%s '%s'", f, attribute.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
filterExpression = fmt.Sprintf("%s AND %s", filterExpression, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
queries = append(queries, qbtypes.QueryEnvelope{
|
||||||
|
Type: qbtypes.QueryTypeBuilder,
|
||||||
|
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||||
|
Name: valuer.GenerateUUID().String(),
|
||||||
|
Signal: telemetrytypes.SignalMetrics,
|
||||||
|
Aggregations: []qbtypes.MetricAggregation{{
|
||||||
|
MetricName: check.Key,
|
||||||
|
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||||
|
SpaceAggregation: metrictypes.SpaceAggregationAvg,
|
||||||
|
}},
|
||||||
|
Filter: &qbtypes.Filter{
|
||||||
|
Expression: filterExpression,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := b.Querier.QueryRange(ctx, orgID, &qbtypes.QueryRangeRequest{
|
||||||
|
SchemaVersion: "v5",
|
||||||
|
Start: uint64(time.Now().Add(-time.Hour).UnixMilli()),
|
||||||
|
End: uint64(time.Now().UnixMilli()),
|
||||||
|
RequestType: qbtypes.RequestTypeScalar,
|
||||||
|
CompositeQuery: qbtypes.CompositeQuery{
|
||||||
|
Queries: queries,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
b.Logger.DebugContext(ctx,
|
||||||
|
"error querying for service metrics connection status",
|
||||||
|
"error", err,
|
||||||
|
"service", definition.GetId(),
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp != nil && len(resp.Data.Results) < 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
queryResponse, ok := resp.Data.Results[0].(*qbtypes.TimeSeriesData)
|
||||||
|
if !ok {
|
||||||
|
b.Logger.ErrorContext(ctx, "unexpected query response type for service metrics connection status",
|
||||||
|
"service", definition.GetId(),
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !hasValidTimeSeriesData(queryResponse) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
statusResp[index] = &integrationtypes.SignalConnectionStatus{
|
||||||
|
CategoryID: category.Category,
|
||||||
|
CategoryDisplayName: category.DisplayName,
|
||||||
|
LastReceivedTsMillis: queryResponse.Aggregations[0].Series[0].Values[0].Timestamp,
|
||||||
|
LastReceivedFrom: fmt.Sprintf("signoz-%s-integration", b.ProviderType.String()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return statusResp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BaseCloudProvider[def, conf]) getServiceLogsConnectionStatus(
|
||||||
|
ctx context.Context,
|
||||||
|
cloudAccountID string,
|
||||||
|
orgID valuer.UUID,
|
||||||
|
definition def,
|
||||||
|
) ([]*integrationtypes.SignalConnectionStatus, error) {
|
||||||
|
ingestionStatusCheck := definition.GetIngestionStatusCheck()
|
||||||
|
if ingestionStatusCheck == nil || len(ingestionStatusCheck.Logs) < 1 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
statusResp := make([]*integrationtypes.SignalConnectionStatus, 0)
|
||||||
|
|
||||||
|
for _, log := range ingestionStatusCheck.Logs {
|
||||||
|
statusResp = append(statusResp, &integrationtypes.SignalConnectionStatus{
|
||||||
|
CategoryID: log.Category,
|
||||||
|
CategoryDisplayName: log.DisplayName,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
for index, category := range ingestionStatusCheck.Logs {
|
||||||
|
queries := make([]qbtypes.QueryEnvelope, 0)
|
||||||
|
|
||||||
|
for _, check := range category.Checks {
|
||||||
|
// TODO: make sure all the cloud providers provide required attributes for logs
|
||||||
|
// or create map of provider specific filter expression
|
||||||
|
filterExpression := fmt.Sprintf(`cloud.account.id="%s"`, cloudAccountID)
|
||||||
|
f := ""
|
||||||
|
for _, attribute := range check.Attributes {
|
||||||
|
f = fmt.Sprintf("%s %s", attribute.Name, attribute.Operator)
|
||||||
|
if attribute.Value != "" {
|
||||||
|
f = fmt.Sprintf("%s '%s'", f, attribute.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
filterExpression = fmt.Sprintf("%s AND %s", filterExpression, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
queries = append(queries, qbtypes.QueryEnvelope{
|
||||||
|
Type: qbtypes.QueryTypeBuilder,
|
||||||
|
Spec: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
|
||||||
|
Name: valuer.GenerateUUID().String(),
|
||||||
|
Signal: telemetrytypes.SignalLogs,
|
||||||
|
Aggregations: []qbtypes.LogAggregation{{
|
||||||
|
Expression: "count()",
|
||||||
|
}},
|
||||||
|
Filter: &qbtypes.Filter{
|
||||||
|
Expression: filterExpression,
|
||||||
|
},
|
||||||
|
Limit: 10,
|
||||||
|
Offset: 0,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := b.Querier.QueryRange(ctx, orgID, &qbtypes.QueryRangeRequest{
|
||||||
|
SchemaVersion: "v1",
|
||||||
|
Start: uint64(time.Now().Add(-time.Hour * 1).UnixMilli()),
|
||||||
|
End: uint64(time.Now().UnixMilli()),
|
||||||
|
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||||
|
CompositeQuery: qbtypes.CompositeQuery{
|
||||||
|
Queries: queries,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
b.Logger.DebugContext(ctx,
|
||||||
|
"error querying for service logs connection status",
|
||||||
|
"error", err,
|
||||||
|
"service", definition.GetId(),
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp != nil && len(resp.Data.Results) < 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
queryResponse, ok := resp.Data.Results[0].(*qbtypes.TimeSeriesData)
|
||||||
|
if !ok {
|
||||||
|
b.Logger.ErrorContext(ctx, "unexpected query response type for service logs connection status",
|
||||||
|
"service", definition.GetId(),
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !hasValidTimeSeriesData(queryResponse) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
statusResp[index] = &integrationtypes.SignalConnectionStatus{
|
||||||
|
CategoryID: category.Category,
|
||||||
|
CategoryDisplayName: category.DisplayName,
|
||||||
|
LastReceivedTsMillis: queryResponse.Aggregations[0].Series[0].Values[0].Timestamp,
|
||||||
|
LastReceivedFrom: fmt.Sprintf("signoz-%s-integration", b.ProviderType.String()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return statusResp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BaseCloudProvider[def, conf]) GetAvailableDashboards(
|
||||||
|
ctx context.Context,
|
||||||
|
orgID valuer.UUID,
|
||||||
|
) ([]*dashboardtypes.Dashboard, error) {
|
||||||
|
accountRecords, err := b.AccountsRepo.ListConnected(ctx, orgID.StringValue(), b.ProviderType.String())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
servicesWithAvailableMetrics := map[string]*time.Time{}
|
||||||
|
|
||||||
|
for _, ar := range accountRecords {
|
||||||
|
if ar.AccountID != nil {
|
||||||
|
configsBySvcId, err := b.ServiceConfigRepo.GetAllForAccount(ctx, orgID.StringValue(), ar.ID.StringValue())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for svcId, config := range configsBySvcId {
|
||||||
|
var serviceConfig conf
|
||||||
|
err = integrationtypes.UnmarshalJSON(config, &serviceConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if serviceConfig.IsMetricsEnabled() {
|
||||||
|
servicesWithAvailableMetrics[svcId] = &ar.CreatedAt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
svcDashboards := make([]*dashboardtypes.Dashboard, 0)
|
||||||
|
|
||||||
|
allServices, err := b.ServiceDefinitions.ListServiceDefinitions(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "failed to list %s service definitions", b.ProviderType.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// accumulate definitions in a fixed order to ensure same order of dashboards across runs
|
||||||
|
svcIds := make([]string, 0, len(allServices))
|
||||||
|
for id := range allServices {
|
||||||
|
svcIds = append(svcIds, id)
|
||||||
|
}
|
||||||
|
sort.Strings(svcIds)
|
||||||
|
|
||||||
|
for _, svcId := range svcIds {
|
||||||
|
svc := allServices[svcId]
|
||||||
|
serviceDashboardsCreatedAt, ok := servicesWithAvailableMetrics[svcId]
|
||||||
|
if ok && serviceDashboardsCreatedAt != nil {
|
||||||
|
svcDashboards = append(
|
||||||
|
svcDashboards,
|
||||||
|
integrationtypes.GetDashboardsFromAssets(svc.GetId(), orgID, b.ProviderType, serviceDashboardsCreatedAt, svc.GetAssets())...,
|
||||||
|
)
|
||||||
|
servicesWithAvailableMetrics[svcId] = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return svcDashboards, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BaseCloudProvider[def, conf]) GetServiceConfig(
|
||||||
|
ctx context.Context,
|
||||||
|
definition def,
|
||||||
|
orgID valuer.UUID,
|
||||||
|
serviceId,
|
||||||
|
cloudAccountId string,
|
||||||
|
) (conf, error) {
|
||||||
|
var zero conf
|
||||||
|
|
||||||
|
activeAccount, err := b.AccountsRepo.GetConnectedCloudAccount(ctx, orgID.String(), b.ProviderType.String(), cloudAccountId)
|
||||||
|
if err != nil {
|
||||||
|
return zero, err
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := b.ServiceConfigRepo.Get(ctx, orgID.String(), activeAccount.ID.StringValue(), serviceId)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Ast(err, errors.TypeNotFound) {
|
||||||
|
return zero, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return zero, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var serviceConfig conf
|
||||||
|
err = integrationtypes.UnmarshalJSON(config, &serviceConfig)
|
||||||
|
if err != nil {
|
||||||
|
return zero, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if config != nil && serviceConfig.IsMetricsEnabled() {
|
||||||
|
definition.PopulateDashboardURLs(b.ProviderType, serviceId)
|
||||||
|
}
|
||||||
|
|
||||||
|
return serviceConfig, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BaseCloudProvider[def, conf]) UpdateServiceConfig(ctx context.Context, serviceId string, orgID valuer.UUID, config []byte) (any, error) {
|
||||||
|
definition, err := b.ServiceDefinitions.GetServiceDefinition(ctx, serviceId)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var updateReq integrationtypes.UpdatableServiceConfig[conf]
|
||||||
|
err = integrationtypes.UnmarshalJSON(config, &updateReq)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if config is provided (use any type assertion for nil check with generics)
|
||||||
|
if any(updateReq.Config) == nil {
|
||||||
|
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "config is required")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = updateReq.Config.Validate(definition); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// can only update config for a connected cloud account id
|
||||||
|
_, err = b.AccountsRepo.GetConnectedCloudAccount(
|
||||||
|
ctx, orgID.String(), b.GetName().String(), updateReq.CloudAccountId,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
serviceConfigBytes, err := integrationtypes.MarshalJSON(&updateReq.Config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
updatedConfigBytes, err := b.ServiceConfigRepo.Upsert(
|
||||||
|
ctx, orgID.String(), b.GetName().String(), updateReq.CloudAccountId, serviceId, serviceConfigBytes,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var updatedConfig conf
|
||||||
|
err = integrationtypes.UnmarshalJSON(updatedConfigBytes, &updatedConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &integrationtypes.UpdatableServiceConfigRes{
|
||||||
|
ServiceId: serviceId,
|
||||||
|
Config: updatedConfig,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
@@ -1,43 +0,0 @@
|
|||||||
package cloudintegrations
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/SigNoz/signoz/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
CodeInvalidCloudRegion = errors.MustNewCode("invalid_cloud_region")
|
|
||||||
CodeMismatchCloudProvider = errors.MustNewCode("cloud_provider_mismatch")
|
|
||||||
)
|
|
||||||
|
|
||||||
// List of all valid cloud regions on Amazon Web Services
|
|
||||||
var ValidAWSRegions = map[string]bool{
|
|
||||||
"af-south-1": true, // Africa (Cape Town).
|
|
||||||
"ap-east-1": true, // Asia Pacific (Hong Kong).
|
|
||||||
"ap-northeast-1": true, // Asia Pacific (Tokyo).
|
|
||||||
"ap-northeast-2": true, // Asia Pacific (Seoul).
|
|
||||||
"ap-northeast-3": true, // Asia Pacific (Osaka).
|
|
||||||
"ap-south-1": true, // Asia Pacific (Mumbai).
|
|
||||||
"ap-south-2": true, // Asia Pacific (Hyderabad).
|
|
||||||
"ap-southeast-1": true, // Asia Pacific (Singapore).
|
|
||||||
"ap-southeast-2": true, // Asia Pacific (Sydney).
|
|
||||||
"ap-southeast-3": true, // Asia Pacific (Jakarta).
|
|
||||||
"ap-southeast-4": true, // Asia Pacific (Melbourne).
|
|
||||||
"ca-central-1": true, // Canada (Central).
|
|
||||||
"ca-west-1": true, // Canada West (Calgary).
|
|
||||||
"eu-central-1": true, // Europe (Frankfurt).
|
|
||||||
"eu-central-2": true, // Europe (Zurich).
|
|
||||||
"eu-north-1": true, // Europe (Stockholm).
|
|
||||||
"eu-south-1": true, // Europe (Milan).
|
|
||||||
"eu-south-2": true, // Europe (Spain).
|
|
||||||
"eu-west-1": true, // Europe (Ireland).
|
|
||||||
"eu-west-2": true, // Europe (London).
|
|
||||||
"eu-west-3": true, // Europe (Paris).
|
|
||||||
"il-central-1": true, // Israel (Tel Aviv).
|
|
||||||
"me-central-1": true, // Middle East (UAE).
|
|
||||||
"me-south-1": true, // Middle East (Bahrain).
|
|
||||||
"sa-east-1": true, // South America (Sao Paulo).
|
|
||||||
"us-east-1": true, // US East (N. Virginia).
|
|
||||||
"us-east-2": true, // US East (Ohio).
|
|
||||||
"us-west-1": true, // US West (N. California).
|
|
||||||
"us-west-2": true, // US West (Oregon).
|
|
||||||
}
|
|
||||||
@@ -1,624 +0,0 @@
|
|||||||
package cloudintegrations
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"slices"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/SigNoz/signoz/pkg/errors"
|
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
|
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
|
||||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
|
||||||
"github.com/SigNoz/signoz/pkg/types"
|
|
||||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
|
||||||
"github.com/SigNoz/signoz/pkg/valuer"
|
|
||||||
"golang.org/x/exp/maps"
|
|
||||||
)
|
|
||||||
|
|
||||||
var SupportedCloudProviders = []string{
|
|
||||||
"aws",
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateCloudProviderName(name string) *model.ApiError {
|
|
||||||
if !slices.Contains(SupportedCloudProviders, name) {
|
|
||||||
return model.BadRequest(fmt.Errorf("invalid cloud provider: %s", name))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type Controller struct {
|
|
||||||
accountsRepo cloudProviderAccountsRepository
|
|
||||||
serviceConfigRepo ServiceConfigDatabase
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewController(sqlStore sqlstore.SQLStore) (*Controller, error) {
|
|
||||||
accountsRepo, err := newCloudProviderAccountsRepository(sqlStore)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("couldn't create cloud provider accounts repo: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
serviceConfigRepo, err := newServiceConfigRepository(sqlStore)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("couldn't create cloud provider service config repo: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Controller{
|
|
||||||
accountsRepo: accountsRepo,
|
|
||||||
serviceConfigRepo: serviceConfigRepo,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type ConnectedAccountsListResponse struct {
|
|
||||||
Accounts []types.Account `json:"accounts"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Controller) ListConnectedAccounts(ctx context.Context, orgId string, cloudProvider string) (
|
|
||||||
*ConnectedAccountsListResponse, *model.ApiError,
|
|
||||||
) {
|
|
||||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
|
||||||
return nil, apiErr
|
|
||||||
}
|
|
||||||
|
|
||||||
accountRecords, apiErr := c.accountsRepo.listConnected(ctx, orgId, cloudProvider)
|
|
||||||
if apiErr != nil {
|
|
||||||
return nil, model.WrapApiError(apiErr, "couldn't list cloud accounts")
|
|
||||||
}
|
|
||||||
|
|
||||||
connectedAccounts := []types.Account{}
|
|
||||||
for _, a := range accountRecords {
|
|
||||||
connectedAccounts = append(connectedAccounts, a.Account())
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ConnectedAccountsListResponse{
|
|
||||||
Accounts: connectedAccounts,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type GenerateConnectionUrlRequest struct {
|
|
||||||
// Optional. To be specified for updates.
|
|
||||||
AccountId *string `json:"account_id,omitempty"`
|
|
||||||
|
|
||||||
AccountConfig types.AccountConfig `json:"account_config"`
|
|
||||||
|
|
||||||
AgentConfig SigNozAgentConfig `json:"agent_config"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type SigNozAgentConfig struct {
|
|
||||||
// The region in which SigNoz agent should be installed.
|
|
||||||
Region string `json:"region"`
|
|
||||||
|
|
||||||
IngestionUrl string `json:"ingestion_url"`
|
|
||||||
IngestionKey string `json:"ingestion_key"`
|
|
||||||
SigNozAPIUrl string `json:"signoz_api_url"`
|
|
||||||
SigNozAPIKey string `json:"signoz_api_key"`
|
|
||||||
|
|
||||||
Version string `json:"version,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type GenerateConnectionUrlResponse struct {
|
|
||||||
AccountId string `json:"account_id"`
|
|
||||||
ConnectionUrl string `json:"connection_url"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Controller) GenerateConnectionUrl(ctx context.Context, orgId string, cloudProvider string, req GenerateConnectionUrlRequest) (*GenerateConnectionUrlResponse, *model.ApiError) {
|
|
||||||
// Account connection with a simple connection URL may not be available for all providers.
|
|
||||||
if cloudProvider != "aws" {
|
|
||||||
return nil, model.BadRequest(fmt.Errorf("unsupported cloud provider: %s", cloudProvider))
|
|
||||||
}
|
|
||||||
|
|
||||||
account, apiErr := c.accountsRepo.upsert(
|
|
||||||
ctx, orgId, cloudProvider, req.AccountId, &req.AccountConfig, nil, nil, nil,
|
|
||||||
)
|
|
||||||
if apiErr != nil {
|
|
||||||
return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
|
|
||||||
}
|
|
||||||
|
|
||||||
agentVersion := "v0.0.8"
|
|
||||||
if req.AgentConfig.Version != "" {
|
|
||||||
agentVersion = req.AgentConfig.Version
|
|
||||||
}
|
|
||||||
|
|
||||||
connectionUrl := fmt.Sprintf(
|
|
||||||
"https://%s.console.aws.amazon.com/cloudformation/home?region=%s#/stacks/quickcreate?",
|
|
||||||
req.AgentConfig.Region, req.AgentConfig.Region,
|
|
||||||
)
|
|
||||||
|
|
||||||
for qp, value := range map[string]string{
|
|
||||||
"param_SigNozIntegrationAgentVersion": agentVersion,
|
|
||||||
"param_SigNozApiUrl": req.AgentConfig.SigNozAPIUrl,
|
|
||||||
"param_SigNozApiKey": req.AgentConfig.SigNozAPIKey,
|
|
||||||
"param_SigNozAccountId": account.ID.StringValue(),
|
|
||||||
"param_IngestionUrl": req.AgentConfig.IngestionUrl,
|
|
||||||
"param_IngestionKey": req.AgentConfig.IngestionKey,
|
|
||||||
"stackName": "signoz-integration",
|
|
||||||
"templateURL": fmt.Sprintf(
|
|
||||||
"https://signoz-integrations.s3.us-east-1.amazonaws.com/aws-quickcreate-template-%s.json",
|
|
||||||
agentVersion,
|
|
||||||
),
|
|
||||||
} {
|
|
||||||
connectionUrl += fmt.Sprintf("&%s=%s", qp, url.QueryEscape(value))
|
|
||||||
}
|
|
||||||
|
|
||||||
return &GenerateConnectionUrlResponse{
|
|
||||||
AccountId: account.ID.StringValue(),
|
|
||||||
ConnectionUrl: connectionUrl,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type AccountStatusResponse struct {
|
|
||||||
Id string `json:"id"`
|
|
||||||
CloudAccountId *string `json:"cloud_account_id,omitempty"`
|
|
||||||
Status types.AccountStatus `json:"status"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Controller) GetAccountStatus(ctx context.Context, orgId string, cloudProvider string, accountId string) (
|
|
||||||
*AccountStatusResponse, *model.ApiError,
|
|
||||||
) {
|
|
||||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
|
||||||
return nil, apiErr
|
|
||||||
}
|
|
||||||
|
|
||||||
account, apiErr := c.accountsRepo.get(ctx, orgId, cloudProvider, accountId)
|
|
||||||
if apiErr != nil {
|
|
||||||
return nil, apiErr
|
|
||||||
}
|
|
||||||
|
|
||||||
resp := AccountStatusResponse{
|
|
||||||
Id: account.ID.StringValue(),
|
|
||||||
CloudAccountId: account.AccountID,
|
|
||||||
Status: account.Status(),
|
|
||||||
}
|
|
||||||
|
|
||||||
return &resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type AgentCheckInRequest struct {
|
|
||||||
ID string `json:"account_id"`
|
|
||||||
AccountID string `json:"cloud_account_id"`
|
|
||||||
// Arbitrary cloud specific Agent data
|
|
||||||
Data map[string]any `json:"data,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type AgentCheckInResponse struct {
|
|
||||||
AccountId string `json:"account_id"`
|
|
||||||
CloudAccountId string `json:"cloud_account_id"`
|
|
||||||
RemovedAt *time.Time `json:"removed_at"`
|
|
||||||
|
|
||||||
IntegrationConfig IntegrationConfigForAgent `json:"integration_config"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type IntegrationConfigForAgent struct {
|
|
||||||
EnabledRegions []string `json:"enabled_regions"`
|
|
||||||
|
|
||||||
TelemetryCollectionStrategy *CompiledCollectionStrategy `json:"telemetry,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Controller) CheckInAsAgent(ctx context.Context, orgId string, cloudProvider string, req AgentCheckInRequest) (*AgentCheckInResponse, error) {
|
|
||||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
|
||||||
return nil, apiErr
|
|
||||||
}
|
|
||||||
|
|
||||||
existingAccount, apiErr := c.accountsRepo.get(ctx, orgId, cloudProvider, req.ID)
|
|
||||||
if existingAccount != nil && existingAccount.AccountID != nil && *existingAccount.AccountID != req.AccountID {
|
|
||||||
return nil, model.BadRequest(fmt.Errorf(
|
|
||||||
"can't check in with new %s account id %s for account %s with existing %s id %s",
|
|
||||||
cloudProvider, req.AccountID, existingAccount.ID.StringValue(), cloudProvider, *existingAccount.AccountID,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
existingAccount, apiErr = c.accountsRepo.getConnectedCloudAccount(ctx, orgId, cloudProvider, req.AccountID)
|
|
||||||
if existingAccount != nil && existingAccount.ID.StringValue() != req.ID {
|
|
||||||
return nil, model.BadRequest(fmt.Errorf(
|
|
||||||
"can't check in to %s account %s with id %s. already connected with id %s",
|
|
||||||
cloudProvider, req.AccountID, req.ID, existingAccount.ID.StringValue(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
agentReport := types.AgentReport{
|
|
||||||
TimestampMillis: time.Now().UnixMilli(),
|
|
||||||
Data: req.Data,
|
|
||||||
}
|
|
||||||
|
|
||||||
account, apiErr := c.accountsRepo.upsert(
|
|
||||||
ctx, orgId, cloudProvider, &req.ID, nil, &req.AccountID, &agentReport, nil,
|
|
||||||
)
|
|
||||||
if apiErr != nil {
|
|
||||||
return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
|
|
||||||
}
|
|
||||||
|
|
||||||
// prepare and return integration config to be consumed by agent
|
|
||||||
compiledStrategy, err := NewCompiledCollectionStrategy(cloudProvider)
|
|
||||||
if err != nil {
|
|
||||||
return nil, model.InternalError(fmt.Errorf(
|
|
||||||
"couldn't init telemetry collection strategy: %w", err,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
agentConfig := IntegrationConfigForAgent{
|
|
||||||
EnabledRegions: []string{},
|
|
||||||
TelemetryCollectionStrategy: compiledStrategy,
|
|
||||||
}
|
|
||||||
|
|
||||||
if account.Config != nil && account.Config.EnabledRegions != nil {
|
|
||||||
agentConfig.EnabledRegions = account.Config.EnabledRegions
|
|
||||||
}
|
|
||||||
|
|
||||||
services, err := services.Map(cloudProvider)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
svcConfigs, apiErr := c.serviceConfigRepo.getAllForAccount(
|
|
||||||
ctx, orgId, account.ID.StringValue(),
|
|
||||||
)
|
|
||||||
if apiErr != nil {
|
|
||||||
return nil, model.WrapApiError(
|
|
||||||
apiErr, "couldn't get service configs for cloud account",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// accumulate config in a fixed order to ensure same config generated across runs
|
|
||||||
configuredServices := maps.Keys(svcConfigs)
|
|
||||||
slices.Sort(configuredServices)
|
|
||||||
|
|
||||||
for _, svcType := range configuredServices {
|
|
||||||
definition, ok := services[svcType]
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
config := svcConfigs[svcType]
|
|
||||||
|
|
||||||
err := AddServiceStrategy(svcType, compiledStrategy, definition.Strategy, config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &AgentCheckInResponse{
|
|
||||||
AccountId: account.ID.StringValue(),
|
|
||||||
CloudAccountId: *account.AccountID,
|
|
||||||
RemovedAt: account.RemovedAt,
|
|
||||||
IntegrationConfig: agentConfig,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type UpdateAccountConfigRequest struct {
|
|
||||||
Config types.AccountConfig `json:"config"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Controller) UpdateAccountConfig(ctx context.Context, orgId string, cloudProvider string, accountId string, req UpdateAccountConfigRequest) (*types.Account, *model.ApiError) {
|
|
||||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
|
||||||
return nil, apiErr
|
|
||||||
}
|
|
||||||
|
|
||||||
accountRecord, apiErr := c.accountsRepo.upsert(
|
|
||||||
ctx, orgId, cloudProvider, &accountId, &req.Config, nil, nil, nil,
|
|
||||||
)
|
|
||||||
if apiErr != nil {
|
|
||||||
return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
|
|
||||||
}
|
|
||||||
|
|
||||||
account := accountRecord.Account()
|
|
||||||
|
|
||||||
return &account, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Controller) DisconnectAccount(ctx context.Context, orgId string, cloudProvider string, accountId string) (*types.CloudIntegration, *model.ApiError) {
|
|
||||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
|
||||||
return nil, apiErr
|
|
||||||
}
|
|
||||||
|
|
||||||
account, apiErr := c.accountsRepo.get(ctx, orgId, cloudProvider, accountId)
|
|
||||||
if apiErr != nil {
|
|
||||||
return nil, model.WrapApiError(apiErr, "couldn't disconnect account")
|
|
||||||
}
|
|
||||||
|
|
||||||
tsNow := time.Now()
|
|
||||||
account, apiErr = c.accountsRepo.upsert(
|
|
||||||
ctx, orgId, cloudProvider, &accountId, nil, nil, nil, &tsNow,
|
|
||||||
)
|
|
||||||
if apiErr != nil {
|
|
||||||
return nil, model.WrapApiError(apiErr, "couldn't disconnect account")
|
|
||||||
}
|
|
||||||
|
|
||||||
return account, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type ListServicesResponse struct {
|
|
||||||
Services []ServiceSummary `json:"services"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Controller) ListServices(
|
|
||||||
ctx context.Context,
|
|
||||||
orgID string,
|
|
||||||
cloudProvider string,
|
|
||||||
cloudAccountId *string,
|
|
||||||
) (*ListServicesResponse, *model.ApiError) {
|
|
||||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
|
||||||
return nil, apiErr
|
|
||||||
}
|
|
||||||
|
|
||||||
definitions, apiErr := services.List(cloudProvider)
|
|
||||||
if apiErr != nil {
|
|
||||||
return nil, model.WrapApiError(apiErr, "couldn't list cloud services")
|
|
||||||
}
|
|
||||||
|
|
||||||
svcConfigs := map[string]*types.CloudServiceConfig{}
|
|
||||||
if cloudAccountId != nil {
|
|
||||||
activeAccount, apiErr := c.accountsRepo.getConnectedCloudAccount(
|
|
||||||
ctx, orgID, cloudProvider, *cloudAccountId,
|
|
||||||
)
|
|
||||||
if apiErr != nil {
|
|
||||||
return nil, model.WrapApiError(apiErr, "couldn't get active account")
|
|
||||||
}
|
|
||||||
svcConfigs, apiErr = c.serviceConfigRepo.getAllForAccount(
|
|
||||||
ctx, orgID, activeAccount.ID.StringValue(),
|
|
||||||
)
|
|
||||||
if apiErr != nil {
|
|
||||||
return nil, model.WrapApiError(
|
|
||||||
apiErr, "couldn't get service configs for cloud account",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
summaries := []ServiceSummary{}
|
|
||||||
for _, def := range definitions {
|
|
||||||
summary := ServiceSummary{
|
|
||||||
Metadata: def.Metadata,
|
|
||||||
}
|
|
||||||
summary.Config = svcConfigs[summary.Id]
|
|
||||||
|
|
||||||
summaries = append(summaries, summary)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ListServicesResponse{
|
|
||||||
Services: summaries,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Controller) GetServiceDetails(
|
|
||||||
ctx context.Context,
|
|
||||||
orgID string,
|
|
||||||
cloudProvider string,
|
|
||||||
serviceId string,
|
|
||||||
cloudAccountId *string,
|
|
||||||
) (*ServiceDetails, error) {
|
|
||||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
|
||||||
return nil, apiErr
|
|
||||||
}
|
|
||||||
|
|
||||||
definition, err := services.GetServiceDefinition(cloudProvider, serviceId)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
details := ServiceDetails{
|
|
||||||
Definition: *definition,
|
|
||||||
}
|
|
||||||
|
|
||||||
if cloudAccountId != nil {
|
|
||||||
|
|
||||||
activeAccount, apiErr := c.accountsRepo.getConnectedCloudAccount(
|
|
||||||
ctx, orgID, cloudProvider, *cloudAccountId,
|
|
||||||
)
|
|
||||||
if apiErr != nil {
|
|
||||||
return nil, model.WrapApiError(apiErr, "couldn't get active account")
|
|
||||||
}
|
|
||||||
|
|
||||||
config, apiErr := c.serviceConfigRepo.get(
|
|
||||||
ctx, orgID, activeAccount.ID.StringValue(), serviceId,
|
|
||||||
)
|
|
||||||
if apiErr != nil && apiErr.Type() != model.ErrorNotFound {
|
|
||||||
return nil, model.WrapApiError(apiErr, "couldn't fetch service config")
|
|
||||||
}
|
|
||||||
|
|
||||||
if config != nil {
|
|
||||||
details.Config = config
|
|
||||||
|
|
||||||
enabled := false
|
|
||||||
if config.Metrics != nil && config.Metrics.Enabled {
|
|
||||||
enabled = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// add links to service dashboards, making them clickable.
|
|
||||||
for i, d := range definition.Assets.Dashboards {
|
|
||||||
dashboardUuid := c.dashboardUuid(
|
|
||||||
cloudProvider, serviceId, d.Id,
|
|
||||||
)
|
|
||||||
if enabled {
|
|
||||||
definition.Assets.Dashboards[i].Url = fmt.Sprintf("/dashboard/%s", dashboardUuid)
|
|
||||||
} else {
|
|
||||||
definition.Assets.Dashboards[i].Url = "" // to unset the in-memory URL if enabled once and disabled afterwards
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &details, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type UpdateServiceConfigRequest struct {
|
|
||||||
CloudAccountId string `json:"cloud_account_id"`
|
|
||||||
Config types.CloudServiceConfig `json:"config"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *UpdateServiceConfigRequest) Validate(def *services.Definition) error {
|
|
||||||
if def.Id != services.S3Sync && u.Config.Logs != nil && u.Config.Logs.S3Buckets != nil {
|
|
||||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "s3 buckets can only be added to service-type[%s]", services.S3Sync)
|
|
||||||
} else if def.Id == services.S3Sync && u.Config.Logs != nil && u.Config.Logs.S3Buckets != nil {
|
|
||||||
for region := range u.Config.Logs.S3Buckets {
|
|
||||||
if _, found := ValidAWSRegions[region]; !found {
|
|
||||||
return errors.NewInvalidInputf(CodeInvalidCloudRegion, "invalid cloud region: %s", region)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type UpdateServiceConfigResponse struct {
|
|
||||||
Id string `json:"id"`
|
|
||||||
Config types.CloudServiceConfig `json:"config"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Controller) UpdateServiceConfig(
|
|
||||||
ctx context.Context,
|
|
||||||
orgID string,
|
|
||||||
cloudProvider string,
|
|
||||||
serviceType string,
|
|
||||||
req *UpdateServiceConfigRequest,
|
|
||||||
) (*UpdateServiceConfigResponse, error) {
|
|
||||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
|
||||||
return nil, apiErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// can only update config for a valid service.
|
|
||||||
definition, err := services.GetServiceDefinition(cloudProvider, serviceType)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := req.Validate(definition); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// can only update config for a connected cloud account id
|
|
||||||
_, apiErr := c.accountsRepo.getConnectedCloudAccount(
|
|
||||||
ctx, orgID, cloudProvider, req.CloudAccountId,
|
|
||||||
)
|
|
||||||
if apiErr != nil {
|
|
||||||
return nil, model.WrapApiError(apiErr, "couldn't find connected cloud account")
|
|
||||||
}
|
|
||||||
|
|
||||||
updatedConfig, apiErr := c.serviceConfigRepo.upsert(
|
|
||||||
ctx, orgID, cloudProvider, req.CloudAccountId, serviceType, req.Config,
|
|
||||||
)
|
|
||||||
if apiErr != nil {
|
|
||||||
return nil, model.WrapApiError(apiErr, "couldn't update service config")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &UpdateServiceConfigResponse{
|
|
||||||
Id: serviceType,
|
|
||||||
Config: *updatedConfig,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// All dashboards that are available based on cloud integrations configuration
|
|
||||||
// across all cloud providers
|
|
||||||
func (c *Controller) AvailableDashboards(ctx context.Context, orgId valuer.UUID) ([]*dashboardtypes.Dashboard, *model.ApiError) {
|
|
||||||
allDashboards := []*dashboardtypes.Dashboard{}
|
|
||||||
|
|
||||||
for _, provider := range []string{"aws"} {
|
|
||||||
providerDashboards, apiErr := c.AvailableDashboardsForCloudProvider(ctx, orgId, provider)
|
|
||||||
if apiErr != nil {
|
|
||||||
return nil, model.WrapApiError(
|
|
||||||
apiErr, fmt.Sprintf("couldn't get available dashboards for %s", provider),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
allDashboards = append(allDashboards, providerDashboards...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return allDashboards, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Controller) AvailableDashboardsForCloudProvider(ctx context.Context, orgID valuer.UUID, cloudProvider string) ([]*dashboardtypes.Dashboard, *model.ApiError) {
|
|
||||||
accountRecords, apiErr := c.accountsRepo.listConnected(ctx, orgID.StringValue(), cloudProvider)
|
|
||||||
if apiErr != nil {
|
|
||||||
return nil, model.WrapApiError(apiErr, "couldn't list connected cloud accounts")
|
|
||||||
}
|
|
||||||
|
|
||||||
// for v0, service dashboards are only available when metrics are enabled.
|
|
||||||
servicesWithAvailableMetrics := map[string]*time.Time{}
|
|
||||||
|
|
||||||
for _, ar := range accountRecords {
|
|
||||||
if ar.AccountID != nil {
|
|
||||||
configsBySvcId, apiErr := c.serviceConfigRepo.getAllForAccount(
|
|
||||||
ctx, orgID.StringValue(), ar.ID.StringValue(),
|
|
||||||
)
|
|
||||||
if apiErr != nil {
|
|
||||||
return nil, apiErr
|
|
||||||
}
|
|
||||||
|
|
||||||
for svcId, config := range configsBySvcId {
|
|
||||||
if config.Metrics != nil && config.Metrics.Enabled {
|
|
||||||
servicesWithAvailableMetrics[svcId] = &ar.CreatedAt
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
allServices, apiErr := services.List(cloudProvider)
|
|
||||||
if apiErr != nil {
|
|
||||||
return nil, apiErr
|
|
||||||
}
|
|
||||||
|
|
||||||
svcDashboards := []*dashboardtypes.Dashboard{}
|
|
||||||
for _, svc := range allServices {
|
|
||||||
serviceDashboardsCreatedAt := servicesWithAvailableMetrics[svc.Id]
|
|
||||||
if serviceDashboardsCreatedAt != nil {
|
|
||||||
for _, d := range svc.Assets.Dashboards {
|
|
||||||
author := fmt.Sprintf("%s-integration", cloudProvider)
|
|
||||||
svcDashboards = append(svcDashboards, &dashboardtypes.Dashboard{
|
|
||||||
ID: c.dashboardUuid(cloudProvider, svc.Id, d.Id),
|
|
||||||
Locked: true,
|
|
||||||
Data: *d.Definition,
|
|
||||||
TimeAuditable: types.TimeAuditable{
|
|
||||||
CreatedAt: *serviceDashboardsCreatedAt,
|
|
||||||
UpdatedAt: *serviceDashboardsCreatedAt,
|
|
||||||
},
|
|
||||||
UserAuditable: types.UserAuditable{
|
|
||||||
CreatedBy: author,
|
|
||||||
UpdatedBy: author,
|
|
||||||
},
|
|
||||||
OrgID: orgID,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
servicesWithAvailableMetrics[svc.Id] = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return svcDashboards, nil
|
|
||||||
}
|
|
||||||
func (c *Controller) GetDashboardById(ctx context.Context, orgId valuer.UUID, dashboardUuid string) (*dashboardtypes.Dashboard, *model.ApiError) {
|
|
||||||
cloudProvider, _, _, apiErr := c.parseDashboardUuid(dashboardUuid)
|
|
||||||
if apiErr != nil {
|
|
||||||
return nil, apiErr
|
|
||||||
}
|
|
||||||
|
|
||||||
allDashboards, apiErr := c.AvailableDashboardsForCloudProvider(ctx, orgId, cloudProvider)
|
|
||||||
if apiErr != nil {
|
|
||||||
return nil, model.WrapApiError(apiErr, "couldn't list available dashboards")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, d := range allDashboards {
|
|
||||||
if d.ID == dashboardUuid {
|
|
||||||
return d, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, model.NotFoundError(fmt.Errorf("couldn't find dashboard with uuid: %s", dashboardUuid))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Controller) dashboardUuid(
|
|
||||||
cloudProvider string, svcId string, dashboardId string,
|
|
||||||
) string {
|
|
||||||
return fmt.Sprintf("cloud-integration--%s--%s--%s", cloudProvider, svcId, dashboardId)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Controller) parseDashboardUuid(dashboardUuid string) (cloudProvider string, svcId string, dashboardId string, apiErr *model.ApiError) {
|
|
||||||
parts := strings.SplitN(dashboardUuid, "--", 4)
|
|
||||||
if len(parts) != 4 || parts[0] != "cloud-integration" {
|
|
||||||
return "", "", "", model.BadRequest(fmt.Errorf("invalid cloud integration dashboard id"))
|
|
||||||
}
|
|
||||||
|
|
||||||
return parts[1], parts[2], parts[3], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Controller) IsCloudIntegrationDashboardUuid(dashboardUuid string) bool {
|
|
||||||
_, _, _, apiErr := c.parseDashboardUuid(dashboardUuid)
|
|
||||||
return apiErr == nil
|
|
||||||
}
|
|
||||||
@@ -0,0 +1,331 @@
|
|||||||
|
package implawsprovider
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"net/url"
|
||||||
|
"slices"
|
||||||
|
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
|
"github.com/SigNoz/signoz/pkg/querier"
|
||||||
|
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/baseprovider"
|
||||||
|
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
|
||||||
|
integrationstore "github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/store"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||||
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
CodeInvalidAWSRegion = errors.MustNewCode("invalid_aws_region")
|
||||||
|
)
|
||||||
|
|
||||||
|
type awsProvider struct {
|
||||||
|
baseprovider.BaseCloudProvider[*integrationtypes.AWSDefinition, *integrationtypes.AWSServiceConfig]
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAWSCloudProvider(
|
||||||
|
logger *slog.Logger,
|
||||||
|
accountsRepo integrationstore.CloudProviderAccountsRepository,
|
||||||
|
serviceConfigRepo integrationstore.ServiceConfigDatabase,
|
||||||
|
querier querier.Querier,
|
||||||
|
) (integrationtypes.CloudProvider, error) {
|
||||||
|
serviceDefinitions, err := services.NewAWSCloudProviderServices()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &awsProvider{
|
||||||
|
BaseCloudProvider: baseprovider.BaseCloudProvider[*integrationtypes.AWSDefinition, *integrationtypes.AWSServiceConfig]{
|
||||||
|
Logger: logger,
|
||||||
|
Querier: querier,
|
||||||
|
AccountsRepo: accountsRepo,
|
||||||
|
ServiceConfigRepo: serviceConfigRepo,
|
||||||
|
ServiceDefinitions: serviceDefinitions,
|
||||||
|
ProviderType: integrationtypes.CloudProviderAWS,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *awsProvider) AgentCheckIn(ctx context.Context, req *integrationtypes.PostableAgentCheckInPayload) (any, error) {
|
||||||
|
return baseprovider.AgentCheckIn(
|
||||||
|
&a.BaseCloudProvider,
|
||||||
|
ctx,
|
||||||
|
req,
|
||||||
|
a.getAWSAgentConfig,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *awsProvider) getAWSAgentConfig(ctx context.Context, account *integrationtypes.CloudIntegration) (*integrationtypes.AWSAgentIntegrationConfig, error) {
|
||||||
|
// prepare and return integration config to be consumed by agent
|
||||||
|
agentConfig := &integrationtypes.AWSAgentIntegrationConfig{
|
||||||
|
EnabledRegions: []string{},
|
||||||
|
TelemetryCollectionStrategy: &integrationtypes.AWSCollectionStrategy{
|
||||||
|
Metrics: &integrationtypes.AWSMetricsStrategy{},
|
||||||
|
Logs: &integrationtypes.AWSLogsStrategy{},
|
||||||
|
S3Buckets: map[string][]string{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
accountConfig := new(integrationtypes.AWSAccountConfig)
|
||||||
|
err := integrationtypes.UnmarshalJSON([]byte(account.Config), accountConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if accountConfig.EnabledRegions != nil {
|
||||||
|
agentConfig.EnabledRegions = accountConfig.EnabledRegions
|
||||||
|
}
|
||||||
|
|
||||||
|
svcConfigs, err := a.ServiceConfigRepo.GetAllForAccount(
|
||||||
|
ctx, account.OrgID, account.ID.StringValue(),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// accumulate config in a fixed order to ensure same config generated across runs
|
||||||
|
configuredServices := maps.Keys(svcConfigs)
|
||||||
|
slices.Sort(configuredServices)
|
||||||
|
|
||||||
|
for _, svcType := range configuredServices {
|
||||||
|
definition, err := a.ServiceDefinitions.GetServiceDefinition(ctx, svcType)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
config := svcConfigs[svcType]
|
||||||
|
|
||||||
|
serviceConfig := new(integrationtypes.AWSServiceConfig)
|
||||||
|
err = integrationtypes.UnmarshalJSON(config, serviceConfig)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if serviceConfig.IsLogsEnabled() {
|
||||||
|
if svcType == integrationtypes.S3Sync {
|
||||||
|
// S3 bucket sync; No cloudwatch logs are appended for this service type;
|
||||||
|
// Though definition is populated with a custom cloudwatch group that helps in calculating logs connection status
|
||||||
|
agentConfig.TelemetryCollectionStrategy.S3Buckets = serviceConfig.Logs.S3Buckets
|
||||||
|
} else if definition.Strategy.Logs != nil { // services that includes a logs subscription
|
||||||
|
agentConfig.TelemetryCollectionStrategy.Logs.Subscriptions = append(
|
||||||
|
agentConfig.TelemetryCollectionStrategy.Logs.Subscriptions,
|
||||||
|
definition.Strategy.Logs.Subscriptions...,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if serviceConfig.IsMetricsEnabled() && definition.Strategy.Metrics != nil {
|
||||||
|
agentConfig.TelemetryCollectionStrategy.Metrics.StreamFilters = append(
|
||||||
|
agentConfig.TelemetryCollectionStrategy.Metrics.StreamFilters,
|
||||||
|
definition.Strategy.Metrics.StreamFilters...,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return agentConfig, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *awsProvider) ListServices(ctx context.Context, orgID string, cloudAccountID *string) (any, error) {
|
||||||
|
svcConfigs := make(map[string]*integrationtypes.AWSServiceConfig)
|
||||||
|
if cloudAccountID != nil {
|
||||||
|
activeAccount, err := a.AccountsRepo.GetConnectedCloudAccount(ctx, orgID, a.GetName().String(), *cloudAccountID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
serviceConfigs, err := a.ServiceConfigRepo.GetAllForAccount(ctx, orgID, activeAccount.ID.String())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for svcType, config := range serviceConfigs {
|
||||||
|
serviceConfig := new(integrationtypes.AWSServiceConfig)
|
||||||
|
err = integrationtypes.UnmarshalJSON(config, serviceConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
svcConfigs[svcType] = serviceConfig
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
summaries := make([]integrationtypes.AWSServiceSummary, 0)
|
||||||
|
|
||||||
|
definitions, err := a.ServiceDefinitions.ListServiceDefinitions(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, def := range definitions {
|
||||||
|
summary := integrationtypes.AWSServiceSummary{
|
||||||
|
DefinitionMetadata: def.DefinitionMetadata,
|
||||||
|
Config: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
summary.Config = svcConfigs[summary.Id]
|
||||||
|
|
||||||
|
summaries = append(summaries, summary)
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.SortFunc(summaries, func(a, b integrationtypes.AWSServiceSummary) int {
|
||||||
|
if a.DefinitionMetadata.Title < b.DefinitionMetadata.Title {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if a.DefinitionMetadata.Title > b.DefinitionMetadata.Title {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
})
|
||||||
|
|
||||||
|
return &integrationtypes.GettableAWSServices{
|
||||||
|
Services: summaries,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *awsProvider) GetServiceDetails(ctx context.Context, req *integrationtypes.GetServiceDetailsReq) (any, error) {
|
||||||
|
details := new(integrationtypes.GettableAWSServiceDetails)
|
||||||
|
|
||||||
|
awsDefinition, err := a.ServiceDefinitions.GetServiceDefinition(ctx, req.ServiceId)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
details.Definition = *awsDefinition
|
||||||
|
if req.CloudAccountID == nil {
|
||||||
|
return details, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := a.GetServiceConfig(ctx, awsDefinition, req.OrgID, req.ServiceId, *req.CloudAccountID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if config == nil {
|
||||||
|
return details, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
details.Config = config
|
||||||
|
|
||||||
|
isMetricsEnabled := config.IsMetricsEnabled()
|
||||||
|
isLogsEnabled := config.IsLogsEnabled()
|
||||||
|
|
||||||
|
connectionStatus, err := a.GetServiceConnectionStatus(
|
||||||
|
ctx,
|
||||||
|
*req.CloudAccountID,
|
||||||
|
req.OrgID,
|
||||||
|
awsDefinition,
|
||||||
|
isMetricsEnabled,
|
||||||
|
isLogsEnabled,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
details.ConnectionStatus = connectionStatus
|
||||||
|
|
||||||
|
return details, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *awsProvider) GenerateConnectionArtifact(ctx context.Context, req *integrationtypes.PostableConnectionArtifact) (any, error) {
|
||||||
|
connection := new(integrationtypes.PostableAWSConnectionUrl)
|
||||||
|
|
||||||
|
err := integrationtypes.UnmarshalJSON(req.Data, connection)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if connection.AccountConfig != nil {
|
||||||
|
for _, region := range connection.AccountConfig.EnabledRegions {
|
||||||
|
if integrationtypes.ValidAWSRegions[region] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.NewInvalidInputf(CodeInvalidAWSRegion, "invalid aws region: %s", region)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := integrationtypes.MarshalJSON(connection.AccountConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
account, err := a.AccountsRepo.Upsert(
|
||||||
|
ctx, req.OrgID, integrationtypes.CloudProviderAWS.String(), nil, config,
|
||||||
|
nil, nil, nil,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
agentVersion := "v0.0.8"
|
||||||
|
if connection.AgentConfig.Version != "" {
|
||||||
|
agentVersion = connection.AgentConfig.Version
|
||||||
|
}
|
||||||
|
|
||||||
|
baseURL := fmt.Sprintf("https://%s.console.aws.amazon.com/cloudformation/home",
|
||||||
|
connection.AgentConfig.Region)
|
||||||
|
u, _ := url.Parse(baseURL)
|
||||||
|
|
||||||
|
q := u.Query()
|
||||||
|
q.Set("region", connection.AgentConfig.Region)
|
||||||
|
u.Fragment = "/stacks/quickcreate"
|
||||||
|
|
||||||
|
u.RawQuery = q.Encode()
|
||||||
|
|
||||||
|
q = u.Query()
|
||||||
|
q.Set("stackName", "signoz-integration")
|
||||||
|
q.Set("templateURL", fmt.Sprintf("https://signoz-integrations.s3.us-east-1.amazonaws.com/aws-quickcreate-template-%s.json", agentVersion))
|
||||||
|
q.Set("param_SigNozIntegrationAgentVersion", agentVersion)
|
||||||
|
q.Set("param_SigNozApiUrl", connection.AgentConfig.SigNozAPIUrl)
|
||||||
|
q.Set("param_SigNozApiKey", connection.AgentConfig.SigNozAPIKey)
|
||||||
|
q.Set("param_SigNozAccountId", account.ID.StringValue())
|
||||||
|
q.Set("param_IngestionUrl", connection.AgentConfig.IngestionUrl)
|
||||||
|
q.Set("param_IngestionKey", connection.AgentConfig.IngestionKey)
|
||||||
|
|
||||||
|
return &integrationtypes.GettableAWSConnectionUrl{
|
||||||
|
AccountId: account.ID.StringValue(),
|
||||||
|
ConnectionUrl: u.String() + "?&" + q.Encode(), // this format is required by AWS
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *awsProvider) UpdateAccountConfig(ctx context.Context, orgId valuer.UUID, accountId string, configBytes []byte) (any, error) {
|
||||||
|
config := new(integrationtypes.UpdatableAWSAccountConfig)
|
||||||
|
|
||||||
|
err := integrationtypes.UnmarshalJSON(configBytes, config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.Config == nil {
|
||||||
|
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "account config can't be null")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, region := range config.Config.EnabledRegions {
|
||||||
|
if integrationtypes.ValidAWSRegions[region] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.NewInvalidInputf(CodeInvalidAWSRegion, "invalid aws region: %s", region)
|
||||||
|
}
|
||||||
|
|
||||||
|
// account must exist to update config, but it doesn't need to be connected
|
||||||
|
_, err = a.AccountsRepo.Get(ctx, orgId.String(), a.GetName().String(), accountId)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
configBytes, err = integrationtypes.MarshalJSON(config.Config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
accountRecord, err := a.AccountsRepo.Upsert(
|
||||||
|
ctx, orgId.String(), a.GetName().String(), &accountId, configBytes, nil, nil, nil,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return accountRecord.Account(a.GetName()), nil
|
||||||
|
}
|
||||||
@@ -0,0 +1,368 @@
|
|||||||
|
package implazureprovider
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
|
"github.com/SigNoz/signoz/pkg/querier"
|
||||||
|
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/baseprovider"
|
||||||
|
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
|
||||||
|
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/store"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||||
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
CodeInvalidAzureRegion = errors.MustNewCode("invalid_azure_region")
|
||||||
|
)
|
||||||
|
|
||||||
|
type azureProvider struct {
|
||||||
|
baseprovider.BaseCloudProvider[*integrationtypes.AzureDefinition, *integrationtypes.AzureServiceConfig]
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAzureCloudProvider(
|
||||||
|
logger *slog.Logger,
|
||||||
|
accountsRepo store.CloudProviderAccountsRepository,
|
||||||
|
serviceConfigRepo store.ServiceConfigDatabase,
|
||||||
|
querier querier.Querier,
|
||||||
|
) (integrationtypes.CloudProvider, error) {
|
||||||
|
azureServiceDefinitions, err := services.NewAzureCloudProviderServices()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &azureProvider{
|
||||||
|
BaseCloudProvider: baseprovider.BaseCloudProvider[*integrationtypes.AzureDefinition, *integrationtypes.AzureServiceConfig]{
|
||||||
|
Logger: logger,
|
||||||
|
Querier: querier,
|
||||||
|
AccountsRepo: accountsRepo,
|
||||||
|
ServiceConfigRepo: serviceConfigRepo,
|
||||||
|
ServiceDefinitions: azureServiceDefinitions,
|
||||||
|
ProviderType: integrationtypes.CloudProviderAzure,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *azureProvider) AgentCheckIn(ctx context.Context, req *integrationtypes.PostableAgentCheckInPayload) (any, error) {
|
||||||
|
return baseprovider.AgentCheckIn(
|
||||||
|
&a.BaseCloudProvider,
|
||||||
|
ctx,
|
||||||
|
req,
|
||||||
|
a.getAzureAgentConfig,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *azureProvider) getAzureAgentConfig(ctx context.Context, account *integrationtypes.CloudIntegration) (*integrationtypes.AzureAgentIntegrationConfig, error) {
|
||||||
|
// prepare and return integration config to be consumed by agent
|
||||||
|
agentConfig := &integrationtypes.AzureAgentIntegrationConfig{
|
||||||
|
TelemetryCollectionStrategy: make(map[string]*integrationtypes.AzureCollectionStrategy),
|
||||||
|
}
|
||||||
|
|
||||||
|
accountConfig := new(integrationtypes.AzureAccountConfig)
|
||||||
|
err := integrationtypes.UnmarshalJSON([]byte(account.Config), accountConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if account.Config != "" {
|
||||||
|
agentConfig.DeploymentRegion = accountConfig.DeploymentRegion
|
||||||
|
agentConfig.EnabledResourceGroups = accountConfig.EnabledResourceGroups
|
||||||
|
}
|
||||||
|
|
||||||
|
svcConfigs, err := a.ServiceConfigRepo.GetAllForAccount(
|
||||||
|
ctx, account.OrgID, account.ID.StringValue(),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// accumulate config in a fixed order to ensure same config generated across runs
|
||||||
|
configuredServices := maps.Keys(svcConfigs)
|
||||||
|
slices.Sort(configuredServices)
|
||||||
|
|
||||||
|
for _, svcType := range configuredServices {
|
||||||
|
definition, err := a.ServiceDefinitions.GetServiceDefinition(ctx, svcType)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
config := svcConfigs[svcType]
|
||||||
|
|
||||||
|
serviceConfig := new(integrationtypes.AzureServiceConfig)
|
||||||
|
err = integrationtypes.UnmarshalJSON(config, serviceConfig)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics := make([]*integrationtypes.AzureMetricsStrategy, 0)
|
||||||
|
logs := make([]*integrationtypes.AzureLogsStrategy, 0)
|
||||||
|
|
||||||
|
metricsStrategyMap := make(map[string]*integrationtypes.AzureMetricsStrategy)
|
||||||
|
logsStrategyMap := make(map[string]*integrationtypes.AzureLogsStrategy)
|
||||||
|
|
||||||
|
if definition.Strategy != nil && definition.Strategy.Metrics != nil {
|
||||||
|
for _, metric := range definition.Strategy.Metrics {
|
||||||
|
metricsStrategyMap[metric.Name] = metric
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if definition.Strategy != nil && definition.Strategy.Logs != nil {
|
||||||
|
for _, log := range definition.Strategy.Logs {
|
||||||
|
logsStrategyMap[log.Name] = log
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if serviceConfig.Metrics != nil {
|
||||||
|
for _, metric := range serviceConfig.Metrics {
|
||||||
|
if metric.Enabled {
|
||||||
|
metrics = append(metrics, &integrationtypes.AzureMetricsStrategy{
|
||||||
|
CategoryType: metricsStrategyMap[metric.Name].CategoryType,
|
||||||
|
Name: metric.Name,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if serviceConfig.Logs != nil {
|
||||||
|
for _, log := range serviceConfig.Logs {
|
||||||
|
if log.Enabled {
|
||||||
|
logs = append(logs, &integrationtypes.AzureLogsStrategy{
|
||||||
|
CategoryType: logsStrategyMap[log.Name].CategoryType,
|
||||||
|
Name: log.Name,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
strategy := &integrationtypes.AzureCollectionStrategy{
|
||||||
|
Metrics: metrics,
|
||||||
|
Logs: logs,
|
||||||
|
}
|
||||||
|
|
||||||
|
agentConfig.TelemetryCollectionStrategy[svcType] = strategy
|
||||||
|
}
|
||||||
|
|
||||||
|
return agentConfig, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *azureProvider) ListServices(ctx context.Context, orgID string, cloudAccountID *string) (any, error) {
|
||||||
|
svcConfigs := make(map[string]*integrationtypes.AzureServiceConfig)
|
||||||
|
if cloudAccountID != nil {
|
||||||
|
activeAccount, err := a.AccountsRepo.GetConnectedCloudAccount(ctx, orgID, a.GetName().String(), *cloudAccountID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
serviceConfigs, err := a.ServiceConfigRepo.GetAllForAccount(ctx, orgID, activeAccount.ID.StringValue())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for svcType, config := range serviceConfigs {
|
||||||
|
serviceConfig := new(integrationtypes.AzureServiceConfig)
|
||||||
|
err = integrationtypes.UnmarshalJSON(config, serviceConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
svcConfigs[svcType] = serviceConfig
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
summaries := make([]integrationtypes.AzureServiceSummary, 0)
|
||||||
|
|
||||||
|
definitions, err := a.ServiceDefinitions.ListServiceDefinitions(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, def := range definitions {
|
||||||
|
summary := integrationtypes.AzureServiceSummary{
|
||||||
|
DefinitionMetadata: def.DefinitionMetadata,
|
||||||
|
Config: nil,
|
||||||
|
}
|
||||||
|
|
||||||
|
summary.Config = svcConfigs[summary.Id]
|
||||||
|
|
||||||
|
summaries = append(summaries, summary)
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.SortFunc(summaries, func(a, b integrationtypes.AzureServiceSummary) int {
|
||||||
|
if a.DefinitionMetadata.Title < b.DefinitionMetadata.Title {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if a.DefinitionMetadata.Title > b.DefinitionMetadata.Title {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
})
|
||||||
|
|
||||||
|
return &integrationtypes.GettableAzureServices{
|
||||||
|
Services: summaries,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *azureProvider) GetServiceDetails(ctx context.Context, req *integrationtypes.GetServiceDetailsReq) (any, error) {
|
||||||
|
details := new(integrationtypes.GettableAzureServiceDetails)
|
||||||
|
|
||||||
|
azureDefinition, err := a.ServiceDefinitions.GetServiceDefinition(ctx, req.ServiceId)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
details.Definition = *azureDefinition
|
||||||
|
if req.CloudAccountID == nil {
|
||||||
|
return details, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := a.GetServiceConfig(ctx, azureDefinition, req.OrgID, req.ServiceId, *req.CloudAccountID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
details.Config = config
|
||||||
|
|
||||||
|
// fill default values for config
|
||||||
|
if details.Config == nil {
|
||||||
|
cfg := new(integrationtypes.AzureServiceConfig)
|
||||||
|
|
||||||
|
logs := make([]*integrationtypes.AzureServiceLogsConfig, 0)
|
||||||
|
if azureDefinition.Strategy != nil && azureDefinition.Strategy.Logs != nil {
|
||||||
|
for _, log := range azureDefinition.Strategy.Logs {
|
||||||
|
logs = append(logs, &integrationtypes.AzureServiceLogsConfig{
|
||||||
|
Enabled: false,
|
||||||
|
Name: log.Name,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics := make([]*integrationtypes.AzureServiceMetricsConfig, 0)
|
||||||
|
if azureDefinition.Strategy != nil && azureDefinition.Strategy.Metrics != nil {
|
||||||
|
for _, metric := range azureDefinition.Strategy.Metrics {
|
||||||
|
metrics = append(metrics, &integrationtypes.AzureServiceMetricsConfig{
|
||||||
|
Enabled: false,
|
||||||
|
Name: metric.Name,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg.Logs = logs
|
||||||
|
cfg.Metrics = metrics
|
||||||
|
|
||||||
|
details.Config = cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
isMetricsEnabled := details.Config != nil && details.Config.IsMetricsEnabled()
|
||||||
|
isLogsEnabled := details.Config != nil && details.Config.IsLogsEnabled()
|
||||||
|
|
||||||
|
connectionStatus, err := a.GetServiceConnectionStatus(
|
||||||
|
ctx,
|
||||||
|
*req.CloudAccountID,
|
||||||
|
req.OrgID,
|
||||||
|
azureDefinition,
|
||||||
|
isMetricsEnabled,
|
||||||
|
isLogsEnabled,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
details.ConnectionStatus = connectionStatus
|
||||||
|
return details, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *azureProvider) GenerateConnectionArtifact(ctx context.Context, req *integrationtypes.PostableConnectionArtifact) (any, error) {
|
||||||
|
connection := new(integrationtypes.PostableAzureConnectionCommand)
|
||||||
|
|
||||||
|
err := integrationtypes.UnmarshalJSON(req.Data, connection)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "failed unmarshal request data into AWS connection config")
|
||||||
|
}
|
||||||
|
|
||||||
|
// validate connection config
|
||||||
|
if connection.AccountConfig != nil {
|
||||||
|
if !integrationtypes.ValidAzureRegions[connection.AccountConfig.DeploymentRegion] {
|
||||||
|
return nil, errors.NewInvalidInputf(CodeInvalidAzureRegion, "invalid azure region: %s",
|
||||||
|
connection.AccountConfig.DeploymentRegion,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := integrationtypes.MarshalJSON(connection.AccountConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
account, err := a.AccountsRepo.Upsert(
|
||||||
|
ctx, req.OrgID, a.GetName().String(), nil, config,
|
||||||
|
nil, nil, nil,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
agentVersion := "v0.0.8"
|
||||||
|
|
||||||
|
if connection.AgentConfig.Version != "" {
|
||||||
|
agentVersion = connection.AgentConfig.Version
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: improve the command and set url
|
||||||
|
cliCommand := []string{"az", "stack", "sub", "create", "--name", "SigNozIntegration", "--location",
|
||||||
|
connection.AccountConfig.DeploymentRegion, "--template-uri", fmt.Sprintf("<url>%s", agentVersion),
|
||||||
|
"--action-on-unmanage", "deleteAll", "--deny-settings-mode", "denyDelete", "--parameters", fmt.Sprintf("rgName=%s", "signoz-integration-rg"),
|
||||||
|
fmt.Sprintf("rgLocation=%s", connection.AccountConfig.DeploymentRegion)}
|
||||||
|
|
||||||
|
return &integrationtypes.GettableAzureConnectionCommand{
|
||||||
|
AccountId: account.ID.String(),
|
||||||
|
AzureShellConnectionCommand: "az create",
|
||||||
|
AzureCliConnectionCommand: strings.Join(cliCommand, " "),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *azureProvider) UpdateAccountConfig(ctx context.Context, orgId valuer.UUID, accountId string, configBytes []byte) (any, error) {
|
||||||
|
config := new(integrationtypes.UpdatableAzureAccountConfig)
|
||||||
|
|
||||||
|
err := integrationtypes.UnmarshalJSON(configBytes, config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(config.Config.EnabledResourceGroups) < 1 {
|
||||||
|
return nil, errors.NewInvalidInputf(CodeInvalidAzureRegion, "azure region and resource groups must be provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
//for azure, preserve deployment region if already set
|
||||||
|
account, err := a.AccountsRepo.Get(ctx, orgId.String(), a.GetName().String(), accountId)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
storedConfig := new(integrationtypes.AzureAccountConfig)
|
||||||
|
err = integrationtypes.UnmarshalJSON([]byte(account.Config), storedConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if account.Config != "" {
|
||||||
|
config.Config.DeploymentRegion = storedConfig.DeploymentRegion
|
||||||
|
}
|
||||||
|
|
||||||
|
configBytes, err = integrationtypes.MarshalJSON(config.Config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
accountRecord, err := a.AccountsRepo.Upsert(
|
||||||
|
ctx, orgId.String(), a.GetName().String(), &accountId, configBytes, nil, nil, nil,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return accountRecord.Account(a.GetName()), nil
|
||||||
|
}
|
||||||
@@ -1,94 +1 @@
|
|||||||
package cloudintegrations
|
package cloudintegrations
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/SigNoz/signoz/pkg/errors"
|
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
|
|
||||||
"github.com/SigNoz/signoz/pkg/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ServiceSummary struct {
|
|
||||||
services.Metadata
|
|
||||||
|
|
||||||
Config *types.CloudServiceConfig `json:"config"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ServiceDetails struct {
|
|
||||||
services.Definition
|
|
||||||
|
|
||||||
Config *types.CloudServiceConfig `json:"config"`
|
|
||||||
ConnectionStatus *ServiceConnectionStatus `json:"status,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type AccountStatus struct {
|
|
||||||
Integration AccountIntegrationStatus `json:"integration"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type AccountIntegrationStatus struct {
|
|
||||||
LastHeartbeatTsMillis *int64 `json:"last_heartbeat_ts_ms"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type LogsConfig struct {
|
|
||||||
Enabled bool `json:"enabled"`
|
|
||||||
S3Buckets map[string][]string `json:"s3_buckets,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type MetricsConfig struct {
|
|
||||||
Enabled bool `json:"enabled"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ServiceConnectionStatus struct {
|
|
||||||
Logs *SignalConnectionStatus `json:"logs"`
|
|
||||||
Metrics *SignalConnectionStatus `json:"metrics"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type SignalConnectionStatus struct {
|
|
||||||
LastReceivedTsMillis int64 `json:"last_received_ts_ms"` // epoch milliseconds
|
|
||||||
LastReceivedFrom string `json:"last_received_from"` // resource identifier
|
|
||||||
}
|
|
||||||
|
|
||||||
type CompiledCollectionStrategy = services.CollectionStrategy
|
|
||||||
|
|
||||||
func NewCompiledCollectionStrategy(provider string) (*CompiledCollectionStrategy, error) {
|
|
||||||
if provider == "aws" {
|
|
||||||
return &CompiledCollectionStrategy{
|
|
||||||
Provider: "aws",
|
|
||||||
AWSMetrics: &services.AWSMetricsStrategy{},
|
|
||||||
AWSLogs: &services.AWSLogsStrategy{},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
return nil, errors.NewNotFoundf(services.CodeUnsupportedCloudProvider, "unsupported cloud provider: %s", provider)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Helper for accumulating strategies for enabled services.
|
|
||||||
func AddServiceStrategy(serviceType string, cs *CompiledCollectionStrategy,
|
|
||||||
definitionStrat *services.CollectionStrategy, config *types.CloudServiceConfig) error {
|
|
||||||
if definitionStrat.Provider != cs.Provider {
|
|
||||||
return errors.NewInternalf(CodeMismatchCloudProvider, "can't add %s service strategy to compiled strategy for %s",
|
|
||||||
definitionStrat.Provider, cs.Provider)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cs.Provider == "aws" {
|
|
||||||
if config.Logs != nil && config.Logs.Enabled {
|
|
||||||
if serviceType == services.S3Sync {
|
|
||||||
// S3 bucket sync; No cloudwatch logs are appended for this service type;
|
|
||||||
// Though definition is populated with a custom cloudwatch group that helps in calculating logs connection status
|
|
||||||
cs.S3Buckets = config.Logs.S3Buckets
|
|
||||||
} else if definitionStrat.AWSLogs != nil { // services that includes a logs subscription
|
|
||||||
cs.AWSLogs.Subscriptions = append(
|
|
||||||
cs.AWSLogs.Subscriptions,
|
|
||||||
definitionStrat.AWSLogs.Subscriptions...,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if config.Metrics != nil && config.Metrics.Enabled && definitionStrat.AWSMetrics != nil {
|
|
||||||
cs.AWSMetrics.StreamFilters = append(
|
|
||||||
cs.AWSMetrics.StreamFilters,
|
|
||||||
definitionStrat.AWSMetrics.StreamFilters...,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return errors.NewNotFoundf(services.CodeUnsupportedCloudProvider, "unsupported cloud provider: %s", cs.Provider)
|
|
||||||
}
|
|
||||||
|
|||||||
37
pkg/query-service/app/cloudintegrations/providerregistry.go
Normal file
37
pkg/query-service/app/cloudintegrations/providerregistry.go
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
package cloudintegrations
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log/slog"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/querier"
|
||||||
|
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/implawsprovider"
|
||||||
|
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/implazureprovider"
|
||||||
|
integrationstore "github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/store"
|
||||||
|
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewCloudProviderRegistry(
|
||||||
|
logger *slog.Logger,
|
||||||
|
store sqlstore.SQLStore,
|
||||||
|
querier querier.Querier,
|
||||||
|
) (map[integrationtypes.CloudProviderType]integrationtypes.CloudProvider, error) {
|
||||||
|
registry := make(map[integrationtypes.CloudProviderType]integrationtypes.CloudProvider)
|
||||||
|
|
||||||
|
accountsRepo := integrationstore.NewCloudProviderAccountsRepository(store)
|
||||||
|
serviceConfigRepo := integrationstore.NewServiceConfigRepository(store)
|
||||||
|
|
||||||
|
awsProviderImpl, err := implawsprovider.NewAWSCloudProvider(logger, accountsRepo, serviceConfigRepo, querier)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
registry[integrationtypes.CloudProviderAWS] = awsProviderImpl
|
||||||
|
|
||||||
|
azureProviderImpl, err := implazureprovider.NewAzureCloudProvider(logger, accountsRepo, serviceConfigRepo, querier)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
registry[integrationtypes.CloudProviderAzure] = azureProviderImpl
|
||||||
|
|
||||||
|
return registry, nil
|
||||||
|
}
|
||||||
@@ -7,6 +7,24 @@
|
|||||||
"metrics": true,
|
"metrics": true,
|
||||||
"logs": false
|
"logs": false
|
||||||
},
|
},
|
||||||
|
"ingestion_status_check": {
|
||||||
|
"metrics": [
|
||||||
|
{
|
||||||
|
"category": "$default",
|
||||||
|
"display_name": "Default",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"key": "aws_ApplicationELB_ConsumedLCUs_count",
|
||||||
|
"attributes": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "aws_ApplicationELB_ProcessedBytes_sum",
|
||||||
|
"attributes": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
"data_collected": {
|
"data_collected": {
|
||||||
"metrics": [
|
"metrics": [
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -7,6 +7,75 @@
|
|||||||
"metrics": true,
|
"metrics": true,
|
||||||
"logs": true
|
"logs": true
|
||||||
},
|
},
|
||||||
|
"ingestion_status_check": {
|
||||||
|
"metrics": [
|
||||||
|
{
|
||||||
|
"category": "rest_api",
|
||||||
|
"display_name": "REST API Metrics",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"key": "aws_ApiGateway_Count_count",
|
||||||
|
"attributes": [
|
||||||
|
{
|
||||||
|
"name": "ApiName",
|
||||||
|
"operator": "EXISTS",
|
||||||
|
"value": ""
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"category": "http_api",
|
||||||
|
"display_name": "HTTP API Metrics",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"key": "aws_ApiGateway_Count_count",
|
||||||
|
"attributes": [
|
||||||
|
{
|
||||||
|
"name": "ApiId",
|
||||||
|
"operator": "EXISTS",
|
||||||
|
"value": ""
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"category": "websocket_api",
|
||||||
|
"display_name": "Websocket API Metrics",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"key": "aws_ApiGateway_Count_count",
|
||||||
|
"attributes": [
|
||||||
|
{
|
||||||
|
"name": "ApiId",
|
||||||
|
"operator": "EXISTS",
|
||||||
|
"value": ""
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"logs": [
|
||||||
|
{
|
||||||
|
"category": "$default",
|
||||||
|
"display_name": "Default",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"attributes": [
|
||||||
|
{
|
||||||
|
"name": "aws.cloudwatch.log_group_name",
|
||||||
|
"operator": "ILIKE",
|
||||||
|
"value": "API-Gateway%"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
"data_collected": {
|
"data_collected": {
|
||||||
"metrics": [
|
"metrics": [
|
||||||
{
|
{
|
||||||
@@ -148,6 +217,146 @@
|
|||||||
"name": "aws_ApiGateway_Latency_sum",
|
"name": "aws_ApiGateway_Latency_sum",
|
||||||
"unit": "Milliseconds",
|
"unit": "Milliseconds",
|
||||||
"type": "Gauge"
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_4xx_sum",
|
||||||
|
"unit": "Bytes",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_4xx_max",
|
||||||
|
"unit": "Bytes",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_4xx_min",
|
||||||
|
"unit": "Bytes",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_4xx_count",
|
||||||
|
"unit": "Bytes",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_5xx_sum",
|
||||||
|
"unit": "Bytes",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_5xx_max",
|
||||||
|
"unit": "Bytes",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_5xx_min",
|
||||||
|
"unit": "Bytes",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_5xx_count",
|
||||||
|
"unit": "Bytes",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_DataProcessed_sum",
|
||||||
|
"unit": "Bytes",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_DataProcessed_max",
|
||||||
|
"unit": "Bytes",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_DataProcessed_min",
|
||||||
|
"unit": "Bytes",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_DataProcessed_count",
|
||||||
|
"unit": "Bytes",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_ExecutionError_sum",
|
||||||
|
"unit": "Count",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_ExecutionError_max",
|
||||||
|
"unit": "Count",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_ExecutionError_min",
|
||||||
|
"unit": "Count",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_ExecutionError_count",
|
||||||
|
"unit": "Count",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_ClientError_sum",
|
||||||
|
"unit": "Count",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_ClientError_max",
|
||||||
|
"unit": "Count",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_ClientError_min",
|
||||||
|
"unit": "Count",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_ClientError_count",
|
||||||
|
"unit": "Count",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_IntegrationError_sum",
|
||||||
|
"unit": "Count",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_IntegrationError_max",
|
||||||
|
"unit": "Count",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_IntegrationError_min",
|
||||||
|
"unit": "Count",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_IntegrationError_count",
|
||||||
|
"unit": "Count",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_ConnectCount_sum",
|
||||||
|
"unit": "Count",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_ConnectCount_max",
|
||||||
|
"unit": "Count",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_ConnectCount_min",
|
||||||
|
"unit": "Count",
|
||||||
|
"type": "Gauge"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "aws_ApiGateway_ConnectCount_count",
|
||||||
|
"unit": "Count",
|
||||||
|
"type": "Gauge"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"logs": [
|
"logs": [
|
||||||
|
|||||||
@@ -7,6 +7,24 @@
|
|||||||
"metrics": true,
|
"metrics": true,
|
||||||
"logs": false
|
"logs": false
|
||||||
},
|
},
|
||||||
|
"ingestion_status_check": {
|
||||||
|
"metrics": [
|
||||||
|
{
|
||||||
|
"category": "$default",
|
||||||
|
"display_name": "Default",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"key": "aws_DynamoDB_AccountMaxReads_max",
|
||||||
|
"attributes": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "aws_DynamoDB_AccountProvisionedReadCapacityUtilization_max",
|
||||||
|
"attributes": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
"data_collected": {
|
"data_collected": {
|
||||||
"metrics": [
|
"metrics": [
|
||||||
{
|
{
|
||||||
@@ -391,4 +409,4 @@
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,24 @@
|
|||||||
"metrics": true,
|
"metrics": true,
|
||||||
"logs": false
|
"logs": false
|
||||||
},
|
},
|
||||||
|
"ingestion_status_check": {
|
||||||
|
"metrics": [
|
||||||
|
{
|
||||||
|
"category": "$default",
|
||||||
|
"display_name": "Default",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"key": "aws_EC2_CPUUtilization_max",
|
||||||
|
"attributes": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "aws_EC2_NetworkIn_max",
|
||||||
|
"attributes": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
"data_collected": {
|
"data_collected": {
|
||||||
"metrics": [
|
"metrics": [
|
||||||
{
|
{
|
||||||
@@ -515,4 +533,4 @@
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,81 @@
|
|||||||
"metrics": true,
|
"metrics": true,
|
||||||
"logs": true
|
"logs": true
|
||||||
},
|
},
|
||||||
|
"ingestion_status_check": {
|
||||||
|
"metrics": [
|
||||||
|
{
|
||||||
|
"category": "overview",
|
||||||
|
"display_name": "Overview",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"key": "aws_ECS_CPUUtilization_max",
|
||||||
|
"attributes": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "aws_ECS_MemoryUtilization_max",
|
||||||
|
"attributes": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"category": "containerinsights",
|
||||||
|
"display_name": "Container Insights",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"key": "aws_ECS_ContainerInsights_NetworkRxBytes_max",
|
||||||
|
"attributes": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "aws_ECS_ContainerInsights_StorageReadBytes_max",
|
||||||
|
"attributes": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"category": "enhanced_containerinsights",
|
||||||
|
"display_name": "Enhanced Container Insights",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"key": "aws_ECS_ContainerInsights_ContainerCpuUtilization_max",
|
||||||
|
"attributes": [
|
||||||
|
{
|
||||||
|
"name": "TaskId",
|
||||||
|
"operator": "EXISTS",
|
||||||
|
"value": ""
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "aws_ECS_ContainerInsights_TaskMemoryUtilization_max",
|
||||||
|
"attributes": [
|
||||||
|
{
|
||||||
|
"name": "TaskId",
|
||||||
|
"operator": "EXISTS",
|
||||||
|
"value": ""
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"logs": [
|
||||||
|
{
|
||||||
|
"category": "$default",
|
||||||
|
"display_name": "Default",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"attributes": [
|
||||||
|
{
|
||||||
|
"name": "aws.cloudwatch.log_group_name",
|
||||||
|
"operator": "ILIKE",
|
||||||
|
"value": "%/ecs/%"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
"data_collected": {
|
"data_collected": {
|
||||||
"metrics": [
|
"metrics": [
|
||||||
{
|
{
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -7,6 +7,20 @@
|
|||||||
"metrics": true,
|
"metrics": true,
|
||||||
"logs": false
|
"logs": false
|
||||||
},
|
},
|
||||||
|
"ingestion_status_check": {
|
||||||
|
"metrics": [
|
||||||
|
{
|
||||||
|
"category": "$default",
|
||||||
|
"display_name": "Default",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"key": "aws_ElastiCache_CacheHitRate_max",
|
||||||
|
"attributes": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
"data_collected": {
|
"data_collected": {
|
||||||
"metrics":[
|
"metrics":[
|
||||||
{
|
{
|
||||||
@@ -1928,7 +1942,7 @@
|
|||||||
"unit": "Percent",
|
"unit": "Percent",
|
||||||
"type": "Gauge",
|
"type": "Gauge",
|
||||||
"description": ""
|
"description": ""
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"telemetry_collection_strategy": {
|
"telemetry_collection_strategy": {
|
||||||
@@ -1951,4 +1965,4 @@
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,37 @@
|
|||||||
"metrics": true,
|
"metrics": true,
|
||||||
"logs": true
|
"logs": true
|
||||||
},
|
},
|
||||||
|
"ingestion_status_check": {
|
||||||
|
"metrics": [
|
||||||
|
{
|
||||||
|
"category": "$default",
|
||||||
|
"display_name": "Default",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"key": "aws_Lambda_Invocations_sum",
|
||||||
|
"attributes": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"logs": [
|
||||||
|
{
|
||||||
|
"category": "$default",
|
||||||
|
"display_name": "Default",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"attributes": [
|
||||||
|
{
|
||||||
|
"name": "aws.cloudwatch.log_group_name",
|
||||||
|
"operator": "ILIKE",
|
||||||
|
"value": "/aws/lambda%"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
"data_collected": {
|
"data_collected": {
|
||||||
"metrics": [
|
"metrics": [
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -7,6 +7,20 @@
|
|||||||
"metrics": true,
|
"metrics": true,
|
||||||
"logs": false
|
"logs": false
|
||||||
},
|
},
|
||||||
|
"ingestion_status_check": {
|
||||||
|
"metrics": [
|
||||||
|
{
|
||||||
|
"category": "$default",
|
||||||
|
"display_name": "Default",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"key": "aws_Kafka_KafkaDataLogsDiskUsed_max",
|
||||||
|
"attributes": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
"data_collected": {
|
"data_collected": {
|
||||||
"metrics": [
|
"metrics": [
|
||||||
{
|
{
|
||||||
@@ -1088,4 +1102,3 @@
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -7,6 +7,37 @@
|
|||||||
"metrics": true,
|
"metrics": true,
|
||||||
"logs": true
|
"logs": true
|
||||||
},
|
},
|
||||||
|
"ingestion_status_check": {
|
||||||
|
"metrics": [
|
||||||
|
{
|
||||||
|
"category": "$default",
|
||||||
|
"display_name": "Default",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"key": "aws_RDS_CPUUtilization_max",
|
||||||
|
"attributes": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"logs": [
|
||||||
|
{
|
||||||
|
"category": "$default",
|
||||||
|
"display_name": "Default",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"attributes": [
|
||||||
|
{
|
||||||
|
"name": "resources.aws.cloudwatch.log_group_name",
|
||||||
|
"operator": "ILIKE",
|
||||||
|
"value": "/aws/rds%"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
"data_collected": {
|
"data_collected": {
|
||||||
"metrics": [
|
"metrics": [
|
||||||
{
|
{
|
||||||
@@ -800,4 +831,4 @@
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,20 @@
|
|||||||
"metrics": true,
|
"metrics": true,
|
||||||
"logs": false
|
"logs": false
|
||||||
},
|
},
|
||||||
|
"ingestion_status_check": {
|
||||||
|
"metrics": [
|
||||||
|
{
|
||||||
|
"category": "$default",
|
||||||
|
"display_name": "Default",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"key": "aws_SNS_NumberOfMessagesPublished_sum",
|
||||||
|
"attributes": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
"data_collected": {
|
"data_collected": {
|
||||||
"metrics": [
|
"metrics": [
|
||||||
{
|
{
|
||||||
@@ -127,4 +141,4 @@
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,24 @@
|
|||||||
"metrics": true,
|
"metrics": true,
|
||||||
"logs": false
|
"logs": false
|
||||||
},
|
},
|
||||||
|
"ingestion_status_check": {
|
||||||
|
"metrics": [
|
||||||
|
{
|
||||||
|
"category": "$default",
|
||||||
|
"display_name": "Default",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"key": "aws_SQS_SentMessageSize_max",
|
||||||
|
"attributes": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "aws_SQS_NumberOfMessagesSent_sum",
|
||||||
|
"attributes": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
"data_collected": {
|
"data_collected": {
|
||||||
"metrics": [
|
"metrics": [
|
||||||
{
|
{
|
||||||
@@ -247,4 +265,4 @@
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,3 @@
|
|||||||
|
{
|
||||||
|
|
||||||
|
}
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
{
|
||||||
|
|
||||||
|
}
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
<svg id="f2f04349-8aee-4413-84c9-a9053611b319" xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 18 18"><defs><linearGradient id="ad4c4f96-09aa-4f91-ba10-5cb8ad530f74" x1="9" y1="15.83" x2="9" y2="5.79" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b3b3b3" /><stop offset="0.26" stop-color="#c1c1c1" /><stop offset="1" stop-color="#e6e6e6" /></linearGradient></defs><title>Icon-storage-86</title><path d="M.5,5.79h17a0,0,0,0,1,0,0v9.48a.57.57,0,0,1-.57.57H1.07a.57.57,0,0,1-.57-.57V5.79A0,0,0,0,1,.5,5.79Z" fill="url(#ad4c4f96-09aa-4f91-ba10-5cb8ad530f74)" /><path d="M1.07,2.17H16.93a.57.57,0,0,1,.57.57V5.79a0,0,0,0,1,0,0H.5a0,0,0,0,1,0,0V2.73A.57.57,0,0,1,1.07,2.17Z" fill="#37c2b1" /><path d="M2.81,6.89H15.18a.27.27,0,0,1,.26.27v1.4a.27.27,0,0,1-.26.27H2.81a.27.27,0,0,1-.26-.27V7.16A.27.27,0,0,1,2.81,6.89Z" fill="#fff" /><path d="M2.82,9.68H15.19a.27.27,0,0,1,.26.27v1.41a.27.27,0,0,1-.26.27H2.82a.27.27,0,0,1-.26-.27V10A.27.27,0,0,1,2.82,9.68Z" fill="#37c2b1" /><path d="M2.82,12.5H15.19a.27.27,0,0,1,.26.27v1.41a.27.27,0,0,1-.26.27H2.82a.27.27,0,0,1-.26-.27V12.77A.27.27,0,0,1,2.82,12.5Z" fill="#258277" /></svg>
|
||||||
|
After Width: | Height: | Size: 1.1 KiB |
@@ -0,0 +1,293 @@
|
|||||||
|
{
|
||||||
|
"id": "blobstorage",
|
||||||
|
"title": "Blob Storage",
|
||||||
|
"icon": "file://icon.svg",
|
||||||
|
"overview": "file://overview.md",
|
||||||
|
"supported_signals": {
|
||||||
|
"metrics": true,
|
||||||
|
"logs": true
|
||||||
|
},
|
||||||
|
"ingestion_status_check": {
|
||||||
|
"metrics": [
|
||||||
|
{
|
||||||
|
"category": "$default",
|
||||||
|
"display_name": "Default",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"key": "placeholder",
|
||||||
|
"attributes": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"category": "transactions",
|
||||||
|
"display_name": "Transactions",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"key": "placeholder",
|
||||||
|
"attributes": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"logs": [
|
||||||
|
{
|
||||||
|
"category": "$default",
|
||||||
|
"display_name": "Default",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"attributes": [
|
||||||
|
{
|
||||||
|
"name": "placeholder",
|
||||||
|
"operator": "ILIKE",
|
||||||
|
"value": "%/ecs/%"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data_collected": {
|
||||||
|
"metrics": [
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"logs": [
|
||||||
|
{
|
||||||
|
"name": "placeholder_log_1",
|
||||||
|
"path": "placeholder.path.value",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_log_1",
|
||||||
|
"path": "placeholder.path.value",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_log_1",
|
||||||
|
"path": "placeholder.path.value",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_log_1",
|
||||||
|
"path": "placeholder.path.value",
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"telemetry_collection_strategy": {
|
||||||
|
"azure_metrics": [
|
||||||
|
{
|
||||||
|
"category_type": "metrics",
|
||||||
|
"name": "Capacity"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"category_type": "metrics",
|
||||||
|
"name": "Transaction"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"azure_logs": [
|
||||||
|
{
|
||||||
|
"category_type": "logs",
|
||||||
|
"name": "StorageRead"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"category_type": "logs",
|
||||||
|
"name": "StorageWrite"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"category_type": "logs",
|
||||||
|
"name": "StorageDelete"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"assets": {
|
||||||
|
"dashboards": [
|
||||||
|
{
|
||||||
|
"id": "overview",
|
||||||
|
"title": "Blob Storage Overview",
|
||||||
|
"description": "Overview of Blob Storage",
|
||||||
|
"definition": "file://assets/dashboards/overview.json"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
Monitor Azure Blob Storage with SigNoz
|
||||||
|
Collect key Blob Storage metrics and view them with an out of the box dashboard.
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
{
|
||||||
|
|
||||||
|
}
|
||||||
@@ -0,0 +1,3 @@
|
|||||||
|
{
|
||||||
|
|
||||||
|
}
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
<svg id="f2f04349-8aee-4413-84c9-a9053611b319" xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 18 18"><defs><linearGradient id="ad4c4f96-09aa-4f91-ba10-5cb8ad530f74" x1="9" y1="15.83" x2="9" y2="5.79" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b3b3b3" /><stop offset="0.26" stop-color="#c1c1c1" /><stop offset="1" stop-color="#e6e6e6" /></linearGradient></defs><title>Icon-storage-86</title><path d="M.5,5.79h17a0,0,0,0,1,0,0v9.48a.57.57,0,0,1-.57.57H1.07a.57.57,0,0,1-.57-.57V5.79A0,0,0,0,1,.5,5.79Z" fill="url(#ad4c4f96-09aa-4f91-ba10-5cb8ad530f74)" /><path d="M1.07,2.17H16.93a.57.57,0,0,1,.57.57V5.79a0,0,0,0,1,0,0H.5a0,0,0,0,1,0,0V2.73A.57.57,0,0,1,1.07,2.17Z" fill="#37c2b1" /><path d="M2.81,6.89H15.18a.27.27,0,0,1,.26.27v1.4a.27.27,0,0,1-.26.27H2.81a.27.27,0,0,1-.26-.27V7.16A.27.27,0,0,1,2.81,6.89Z" fill="#fff" /><path d="M2.82,9.68H15.19a.27.27,0,0,1,.26.27v1.41a.27.27,0,0,1-.26.27H2.82a.27.27,0,0,1-.26-.27V10A.27.27,0,0,1,2.82,9.68Z" fill="#37c2b1" /><path d="M2.82,12.5H15.19a.27.27,0,0,1,.26.27v1.41a.27.27,0,0,1-.26.27H2.82a.27.27,0,0,1-.26-.27V12.77A.27.27,0,0,1,2.82,12.5Z" fill="#258277" /></svg>
|
||||||
|
After Width: | Height: | Size: 1.1 KiB |
@@ -0,0 +1,289 @@
|
|||||||
|
{
|
||||||
|
"id": "frontdoor",
|
||||||
|
"title": "Front Door",
|
||||||
|
"icon": "file://icon.svg",
|
||||||
|
"overview": "file://overview.md",
|
||||||
|
"supported_signals": {
|
||||||
|
"metrics": true,
|
||||||
|
"logs": true
|
||||||
|
},
|
||||||
|
"ingestion_status_check": {
|
||||||
|
"metrics": [
|
||||||
|
{
|
||||||
|
"category": "overview",
|
||||||
|
"display_name": "Overview",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"key": "placeholder",
|
||||||
|
"attributes": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"category": "insights",
|
||||||
|
"display_name": "Blob Storage Insights",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"key": "placeholder",
|
||||||
|
"attributes": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
],
|
||||||
|
"logs": [
|
||||||
|
{
|
||||||
|
"category": "$default",
|
||||||
|
"display_name": "Default",
|
||||||
|
"checks": [
|
||||||
|
{
|
||||||
|
"attributes": [
|
||||||
|
{
|
||||||
|
"name": "placeholder",
|
||||||
|
"operator": "ILIKE",
|
||||||
|
"value": "%/ecs/%"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data_collected": {
|
||||||
|
"metrics": [
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_metric_1",
|
||||||
|
"unit": "Percent",
|
||||||
|
"type": "Gauge",
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"logs": [
|
||||||
|
{
|
||||||
|
"name": "placeholder_log_1",
|
||||||
|
"path": "placeholder.path.value",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_log_1",
|
||||||
|
"path": "placeholder.path.value",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "placeholder_log_1",
|
||||||
|
"path": "placeholder.path.value",
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"telemetry_collection_strategy": {
|
||||||
|
"azure_metrics": [
|
||||||
|
{
|
||||||
|
"category_type": "metrics",
|
||||||
|
"name": "Capacity"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"category_type": "metrics",
|
||||||
|
"name": "Transaction"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"azure_logs": [
|
||||||
|
{
|
||||||
|
"category_type": "logs",
|
||||||
|
"name": "StorageRead"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"category_type": "logs",
|
||||||
|
"name": "StorageWrite"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"category_type": "logs",
|
||||||
|
"name": "StorageDelete"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"assets": {
|
||||||
|
"dashboards": [
|
||||||
|
{
|
||||||
|
"id": "overview",
|
||||||
|
"title": "Front Door Overview",
|
||||||
|
"description": "Overview of Blob Storage",
|
||||||
|
"definition": "file://assets/dashboards/overview.json"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
Monitor Azure Front Door with SigNoz
|
||||||
|
Collect key Front Door metrics and view them with an out of the box dashboard.
|
||||||
@@ -1,91 +0,0 @@
|
|||||||
package services
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Metadata struct {
|
|
||||||
Id string `json:"id"`
|
|
||||||
Title string `json:"title"`
|
|
||||||
Icon string `json:"icon"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Definition struct {
|
|
||||||
Metadata
|
|
||||||
|
|
||||||
Overview string `json:"overview"` // markdown
|
|
||||||
|
|
||||||
Assets Assets `json:"assets"`
|
|
||||||
|
|
||||||
SupportedSignals SupportedSignals `json:"supported_signals"`
|
|
||||||
|
|
||||||
DataCollected DataCollected `json:"data_collected"`
|
|
||||||
|
|
||||||
Strategy *CollectionStrategy `json:"telemetry_collection_strategy"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Assets struct {
|
|
||||||
Dashboards []Dashboard `json:"dashboards"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type SupportedSignals struct {
|
|
||||||
Logs bool `json:"logs"`
|
|
||||||
Metrics bool `json:"metrics"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type DataCollected struct {
|
|
||||||
Logs []CollectedLogAttribute `json:"logs"`
|
|
||||||
Metrics []CollectedMetric `json:"metrics"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type CollectedLogAttribute struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Path string `json:"path"`
|
|
||||||
Type string `json:"type"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type CollectedMetric struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Type string `json:"type"`
|
|
||||||
Unit string `json:"unit"`
|
|
||||||
Description string `json:"description"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type CollectionStrategy struct {
|
|
||||||
Provider string `json:"provider"`
|
|
||||||
|
|
||||||
AWSMetrics *AWSMetricsStrategy `json:"aws_metrics,omitempty"`
|
|
||||||
AWSLogs *AWSLogsStrategy `json:"aws_logs,omitempty"`
|
|
||||||
S3Buckets map[string][]string `json:"s3_buckets,omitempty"` // Only available in S3 Sync Service Type
|
|
||||||
}
|
|
||||||
|
|
||||||
type AWSMetricsStrategy struct {
|
|
||||||
// to be used as https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-metricstream.html#cfn-cloudwatch-metricstream-includefilters
|
|
||||||
StreamFilters []struct {
|
|
||||||
// json tags here are in the shape expected by AWS API as detailed at
|
|
||||||
// https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudwatch-metricstream-metricstreamfilter.html
|
|
||||||
Namespace string `json:"Namespace"`
|
|
||||||
MetricNames []string `json:"MetricNames,omitempty"`
|
|
||||||
} `json:"cloudwatch_metric_stream_filters"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type AWSLogsStrategy struct {
|
|
||||||
Subscriptions []struct {
|
|
||||||
// subscribe to all logs groups with specified prefix.
|
|
||||||
// eg: `/aws/rds/`
|
|
||||||
LogGroupNamePrefix string `json:"log_group_name_prefix"`
|
|
||||||
|
|
||||||
// https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html
|
|
||||||
// "" implies no filtering is required.
|
|
||||||
FilterPattern string `json:"filter_pattern"`
|
|
||||||
} `json:"cloudwatch_logs_subscriptions"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Dashboard struct {
|
|
||||||
Id string `json:"id"`
|
|
||||||
Url string `json:"url"`
|
|
||||||
Title string `json:"title"`
|
|
||||||
Description string `json:"description"`
|
|
||||||
Image string `json:"image"`
|
|
||||||
Definition *dashboardtypes.StorableDashboardData `json:"definition,omitempty"`
|
|
||||||
}
|
|
||||||
@@ -2,128 +2,111 @@ package services
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"embed"
|
"embed"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"path"
|
"path"
|
||||||
"sort"
|
|
||||||
|
|
||||||
"github.com/SigNoz/signoz/pkg/errors"
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||||
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
koanfJson "github.com/knadh/koanf/parsers/json"
|
koanfJson "github.com/knadh/koanf/parsers/json"
|
||||||
"golang.org/x/exp/maps"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
S3Sync = "s3sync"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
CodeUnsupportedCloudProvider = errors.MustNewCode("unsupported_cloud_provider")
|
CodeServiceDefinitionNotFound = errors.MustNewCode("service_definition_not_dound")
|
||||||
CodeUnsupportedServiceType = errors.MustNewCode("unsupported_service_type")
|
CodeUnsupportedCloudProvider = errors.MustNewCode("unsupported_cloud_provider")
|
||||||
|
CodeUnsupportedServiceType = errors.MustNewCode("unsupported_service_type")
|
||||||
)
|
)
|
||||||
|
|
||||||
func List(cloudProvider string) ([]Definition, *model.ApiError) {
|
type ServicesProvider[T integrationtypes.Definition] struct {
|
||||||
cloudServices, found := supportedServices[cloudProvider]
|
definitions map[string]T
|
||||||
if !found || cloudServices == nil {
|
|
||||||
return nil, model.NotFoundError(fmt.Errorf(
|
|
||||||
"unsupported cloud provider: %s", cloudProvider,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
services := maps.Values(cloudServices)
|
|
||||||
sort.Slice(services, func(i, j int) bool {
|
|
||||||
return services[i].Id < services[j].Id
|
|
||||||
})
|
|
||||||
|
|
||||||
return services, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func Map(cloudProvider string) (map[string]Definition, error) {
|
func (a *ServicesProvider[T]) ListServiceDefinitions(ctx context.Context) (map[string]T, error) {
|
||||||
cloudServices, found := supportedServices[cloudProvider]
|
return a.definitions, nil
|
||||||
if !found || cloudServices == nil {
|
}
|
||||||
return nil, errors.Newf(errors.TypeNotFound, CodeUnsupportedCloudProvider, "unsupported cloud provider: %s", cloudProvider)
|
|
||||||
|
func (a *ServicesProvider[T]) GetServiceDefinition(ctx context.Context, serviceName string) (T, error) {
|
||||||
|
def, ok := a.definitions[serviceName]
|
||||||
|
if !ok {
|
||||||
|
return *new(T), errors.NewNotFoundf(CodeServiceDefinitionNotFound, "azure service definition not found: %s", serviceName)
|
||||||
|
}
|
||||||
|
|
||||||
|
return def, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAWSCloudProviderServices() (*ServicesProvider[*integrationtypes.AWSDefinition], error) {
|
||||||
|
definitions, err := readAllServiceDefinitions(integrationtypes.CloudProviderAWS)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
serviceDefinitions := make(map[string]*integrationtypes.AWSDefinition)
|
||||||
|
for id, def := range definitions {
|
||||||
|
typedDef, ok := def.(*integrationtypes.AWSDefinition)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.NewInternalf(errors.CodeInternal, "invalid type for AWS service definition %s", id)
|
||||||
|
}
|
||||||
|
serviceDefinitions[id] = typedDef
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ServicesProvider[*integrationtypes.AWSDefinition]{
|
||||||
|
definitions: serviceDefinitions,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAzureCloudProviderServices() (*ServicesProvider[*integrationtypes.AzureDefinition], error) {
|
||||||
|
definitions, err := readAllServiceDefinitions(integrationtypes.CloudProviderAzure)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
serviceDefinitions := make(map[string]*integrationtypes.AzureDefinition)
|
||||||
|
for id, def := range definitions {
|
||||||
|
typedDef, ok := def.(*integrationtypes.AzureDefinition)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.NewInternalf(errors.CodeInternal, "invalid type for Azure service definition %s", id)
|
||||||
|
}
|
||||||
|
serviceDefinitions[id] = typedDef
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ServicesProvider[*integrationtypes.AzureDefinition]{
|
||||||
|
definitions: serviceDefinitions,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// End of API. Logic for reading service definition files follows
|
||||||
|
|
||||||
|
//go:embed definitions/*
|
||||||
|
var definitionFiles embed.FS
|
||||||
|
|
||||||
|
func readAllServiceDefinitions(cloudProvider valuer.String) (map[string]any, error) {
|
||||||
|
rootDirName := "definitions"
|
||||||
|
|
||||||
|
cloudProviderDirPath := path.Join(rootDirName, cloudProvider.String())
|
||||||
|
|
||||||
|
cloudServices, err := readServiceDefinitionsFromDir(cloudProvider, cloudProviderDirPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(cloudServices) < 1 {
|
||||||
|
return nil, errors.NewInternalf(errors.CodeInternal, "no service definitions found in %s", cloudProviderDirPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
return cloudServices, nil
|
return cloudServices, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetServiceDefinition(cloudProvider, serviceType string) (*Definition, error) {
|
func readServiceDefinitionsFromDir(cloudProvider valuer.String, cloudProviderDirPath string) (map[string]any, error) {
|
||||||
cloudServices := supportedServices[cloudProvider]
|
|
||||||
if cloudServices == nil {
|
|
||||||
return nil, errors.Newf(errors.TypeNotFound, CodeUnsupportedCloudProvider, "unsupported cloud provider: %s", cloudProvider)
|
|
||||||
}
|
|
||||||
|
|
||||||
svc, exists := cloudServices[serviceType]
|
|
||||||
if !exists {
|
|
||||||
return nil, errors.Newf(errors.TypeNotFound, CodeUnsupportedServiceType, "%s service not found: %s", cloudProvider, serviceType)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &svc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// End of API. Logic for reading service definition files follows
|
|
||||||
|
|
||||||
// Service details read from ./serviceDefinitions
|
|
||||||
// { "providerName": { "service_id": {...}} }
|
|
||||||
var supportedServices map[string]map[string]Definition
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
err := readAllServiceDefinitions()
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Errorf(
|
|
||||||
"couldn't read cloud service definitions: %w", err,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:embed definitions/*
|
|
||||||
var definitionFiles embed.FS
|
|
||||||
|
|
||||||
func readAllServiceDefinitions() error {
|
|
||||||
supportedServices = map[string]map[string]Definition{}
|
|
||||||
|
|
||||||
rootDirName := "definitions"
|
|
||||||
|
|
||||||
cloudProviderDirs, err := fs.ReadDir(definitionFiles, rootDirName)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("couldn't read dirs in %s: %w", rootDirName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, d := range cloudProviderDirs {
|
|
||||||
if !d.IsDir() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
cloudProvider := d.Name()
|
|
||||||
|
|
||||||
cloudProviderDirPath := path.Join(rootDirName, cloudProvider)
|
|
||||||
cloudServices, err := readServiceDefinitionsFromDir(cloudProvider, cloudProviderDirPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("couldn't read %s service definitions: %w", cloudProvider, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(cloudServices) < 1 {
|
|
||||||
return fmt.Errorf("no %s services could be read", cloudProvider)
|
|
||||||
}
|
|
||||||
|
|
||||||
supportedServices[cloudProvider] = cloudServices
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func readServiceDefinitionsFromDir(cloudProvider string, cloudProviderDirPath string) (
|
|
||||||
map[string]Definition, error,
|
|
||||||
) {
|
|
||||||
svcDefDirs, err := fs.ReadDir(definitionFiles, cloudProviderDirPath)
|
svcDefDirs, err := fs.ReadDir(definitionFiles, cloudProviderDirPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't list integrations dirs: %w", err)
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't list integrations dirs")
|
||||||
}
|
}
|
||||||
|
|
||||||
svcDefs := map[string]Definition{}
|
svcDefs := make(map[string]any)
|
||||||
|
|
||||||
for _, d := range svcDefDirs {
|
for _, d := range svcDefDirs {
|
||||||
if !d.IsDir() {
|
if !d.IsDir() {
|
||||||
@@ -133,103 +116,73 @@ func readServiceDefinitionsFromDir(cloudProvider string, cloudProviderDirPath st
|
|||||||
svcDirPath := path.Join(cloudProviderDirPath, d.Name())
|
svcDirPath := path.Join(cloudProviderDirPath, d.Name())
|
||||||
s, err := readServiceDefinition(cloudProvider, svcDirPath)
|
s, err := readServiceDefinition(cloudProvider, svcDirPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't read svc definition for %s: %w", d.Name(), err)
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, exists := svcDefs[s.Id]
|
_, exists := svcDefs[s.GetId()]
|
||||||
if exists {
|
if exists {
|
||||||
return nil, fmt.Errorf(
|
return nil, errors.NewInternalf(errors.CodeInternal, "duplicate service definition for id %s at %s", s.GetId(), d.Name())
|
||||||
"duplicate service definition for id %s at %s", s.Id, d.Name(),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
svcDefs[s.Id] = *s
|
svcDefs[s.GetId()] = s
|
||||||
}
|
}
|
||||||
|
|
||||||
return svcDefs, nil
|
return svcDefs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readServiceDefinition(cloudProvider string, svcDirpath string) (*Definition, error) {
|
func readServiceDefinition(cloudProvider valuer.String, svcDirpath string) (integrationtypes.Definition, error) {
|
||||||
integrationJsonPath := path.Join(svcDirpath, "integration.json")
|
integrationJsonPath := path.Join(svcDirpath, "integration.json")
|
||||||
|
|
||||||
serializedSpec, err := definitionFiles.ReadFile(integrationJsonPath)
|
serializedSpec, err := definitionFiles.ReadFile(integrationJsonPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't read integration definition in %s", svcDirpath)
|
||||||
"couldn't find integration.json in %s: %w",
|
|
||||||
svcDirpath, err,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
integrationSpec, err := koanfJson.Parser().Unmarshal(serializedSpec)
|
integrationSpec, err := koanfJson.Parser().Unmarshal(serializedSpec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't parse integration definition in %s", svcDirpath)
|
||||||
"couldn't parse integration.json from %s: %w",
|
|
||||||
integrationJsonPath, err,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
hydrated, err := integrations.HydrateFileUris(
|
hydrated, err := integrations.HydrateFileUris(integrationSpec, definitionFiles, svcDirpath)
|
||||||
integrationSpec, definitionFiles, svcDirpath,
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't hydrate integration definition in %s", svcDirpath)
|
||||||
"couldn't hydrate files referenced in service definition %s: %w",
|
|
||||||
integrationJsonPath, err,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
hydratedSpec := hydrated.(map[string]any)
|
hydratedSpec := hydrated.(map[string]any)
|
||||||
|
|
||||||
serviceDef, err := ParseStructWithJsonTagsFromMap[Definition](hydratedSpec)
|
var serviceDef integrationtypes.Definition
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf(
|
switch cloudProvider {
|
||||||
"couldn't parse hydrated JSON spec read from %s: %w",
|
case integrationtypes.CloudProviderAWS:
|
||||||
integrationJsonPath, err,
|
serviceDef = &integrationtypes.AWSDefinition{}
|
||||||
)
|
case integrationtypes.CloudProviderAzure:
|
||||||
|
serviceDef = &integrationtypes.AzureDefinition{}
|
||||||
|
default:
|
||||||
|
// ideally this shouldn't happen hence throwing internal error
|
||||||
|
return nil, errors.NewInternalf(errors.CodeInternal, "unsupported cloud provider: %s", cloudProvider)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = validateServiceDefinition(serviceDef)
|
err = parseStructWithJsonTagsFromMap(hydratedSpec, serviceDef)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid service definition %s: %w", serviceDef.Id, err)
|
return nil, err
|
||||||
|
}
|
||||||
|
err = serviceDef.Validate()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
serviceDef.Strategy.Provider = cloudProvider
|
|
||||||
|
|
||||||
return serviceDef, nil
|
return serviceDef, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateServiceDefinition(s *Definition) error {
|
func parseStructWithJsonTagsFromMap(data map[string]any, target interface{}) error {
|
||||||
// Validate dashboard data
|
|
||||||
seenDashboardIds := map[string]interface{}{}
|
|
||||||
for _, dd := range s.Assets.Dashboards {
|
|
||||||
if _, seen := seenDashboardIds[dd.Id]; seen {
|
|
||||||
return fmt.Errorf("multiple dashboards found with id %s", dd.Id)
|
|
||||||
}
|
|
||||||
seenDashboardIds[dd.Id] = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.Strategy == nil {
|
|
||||||
return fmt.Errorf("telemetry_collection_strategy is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
// potentially more to follow
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ParseStructWithJsonTagsFromMap[StructType any](data map[string]any) (
|
|
||||||
*StructType, error,
|
|
||||||
) {
|
|
||||||
mapJson, err := json.Marshal(data)
|
mapJson, err := json.Marshal(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't marshal map to json: %w", err)
|
return errors.WrapInternalf(err, errors.CodeInternal, "couldn't marshal service definition json data")
|
||||||
}
|
}
|
||||||
|
|
||||||
var res StructType
|
|
||||||
decoder := json.NewDecoder(bytes.NewReader(mapJson))
|
decoder := json.NewDecoder(bytes.NewReader(mapJson))
|
||||||
decoder.DisallowUnknownFields()
|
decoder.DisallowUnknownFields()
|
||||||
err = decoder.Decode(&res)
|
err = decoder.Decode(target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't unmarshal json back to struct: %w", err)
|
return errors.WrapInternalf(err, errors.CodeInternal, "couldn't unmarshal service definition json data")
|
||||||
}
|
}
|
||||||
return &res, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,35 +1,3 @@
|
|||||||
package services
|
package services
|
||||||
|
|
||||||
import (
|
// TODO: add more tests for services package
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/SigNoz/signoz/pkg/errors"
|
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAvailableServices(t *testing.T) {
|
|
||||||
require := require.New(t)
|
|
||||||
|
|
||||||
// should be able to list available services.
|
|
||||||
_, apiErr := List("bad-cloud-provider")
|
|
||||||
require.NotNil(apiErr)
|
|
||||||
require.Equal(model.ErrorNotFound, apiErr.Type())
|
|
||||||
|
|
||||||
awsSvcs, apiErr := List("aws")
|
|
||||||
require.Nil(apiErr)
|
|
||||||
require.Greater(len(awsSvcs), 0)
|
|
||||||
|
|
||||||
// should be able to get details of a service
|
|
||||||
_, err := GetServiceDefinition(
|
|
||||||
"aws", "bad-service-id",
|
|
||||||
)
|
|
||||||
require.NotNil(err)
|
|
||||||
require.True(errors.Ast(err, errors.TypeNotFound))
|
|
||||||
|
|
||||||
svc, err := GetServiceDefinition(
|
|
||||||
"aws", awsSvcs[0].Id,
|
|
||||||
)
|
|
||||||
require.Nil(err)
|
|
||||||
require.Equal(*svc, awsSvcs[0])
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,55 +1,57 @@
|
|||||||
package cloudintegrations
|
package store
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||||
"github.com/SigNoz/signoz/pkg/types"
|
"github.com/SigNoz/signoz/pkg/types"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||||
"github.com/SigNoz/signoz/pkg/valuer"
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
)
|
)
|
||||||
|
|
||||||
type cloudProviderAccountsRepository interface {
|
var (
|
||||||
listConnected(ctx context.Context, orgId string, provider string) ([]types.CloudIntegration, *model.ApiError)
|
CodeCloudIntegrationAccountNotFound errors.Code = errors.MustNewCode("cloud_integration_account_not_found")
|
||||||
|
)
|
||||||
|
|
||||||
get(ctx context.Context, orgId string, provider string, id string) (*types.CloudIntegration, *model.ApiError)
|
type CloudProviderAccountsRepository interface {
|
||||||
|
ListConnected(ctx context.Context, orgId string, provider string) ([]integrationtypes.CloudIntegration, error)
|
||||||
|
|
||||||
getConnectedCloudAccount(ctx context.Context, orgId string, provider string, accountID string) (*types.CloudIntegration, *model.ApiError)
|
Get(ctx context.Context, orgId string, provider string, id string) (*integrationtypes.CloudIntegration, error)
|
||||||
|
|
||||||
|
GetConnectedCloudAccount(ctx context.Context, orgId, provider string, accountID string) (*integrationtypes.CloudIntegration, error)
|
||||||
|
|
||||||
// Insert an account or update it by (cloudProvider, id)
|
// Insert an account or update it by (cloudProvider, id)
|
||||||
// for specified non-empty fields
|
// for specified non-empty fields
|
||||||
upsert(
|
Upsert(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
orgId string,
|
orgId string,
|
||||||
provider string,
|
provider string,
|
||||||
id *string,
|
id *string,
|
||||||
config *types.AccountConfig,
|
config []byte,
|
||||||
accountId *string,
|
accountId *string,
|
||||||
agentReport *types.AgentReport,
|
agentReport *integrationtypes.AgentReport,
|
||||||
removedAt *time.Time,
|
removedAt *time.Time,
|
||||||
) (*types.CloudIntegration, *model.ApiError)
|
) (*integrationtypes.CloudIntegration, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCloudProviderAccountsRepository(store sqlstore.SQLStore) (
|
func NewCloudProviderAccountsRepository(store sqlstore.SQLStore) CloudProviderAccountsRepository {
|
||||||
*cloudProviderAccountsSQLRepository, error,
|
return &cloudProviderAccountsSQLRepository{store: store}
|
||||||
) {
|
|
||||||
return &cloudProviderAccountsSQLRepository{
|
|
||||||
store: store,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type cloudProviderAccountsSQLRepository struct {
|
type cloudProviderAccountsSQLRepository struct {
|
||||||
store sqlstore.SQLStore
|
store sqlstore.SQLStore
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *cloudProviderAccountsSQLRepository) listConnected(
|
func (r *cloudProviderAccountsSQLRepository) ListConnected(
|
||||||
ctx context.Context, orgId string, cloudProvider string,
|
ctx context.Context, orgId string, cloudProvider string,
|
||||||
) ([]types.CloudIntegration, *model.ApiError) {
|
) ([]integrationtypes.CloudIntegration, error) {
|
||||||
accounts := []types.CloudIntegration{}
|
accounts := []integrationtypes.CloudIntegration{}
|
||||||
|
|
||||||
err := r.store.BunDB().NewSelect().
|
err := r.store.BunDB().NewSelect().
|
||||||
Model(&accounts).
|
Model(&accounts).
|
||||||
@@ -62,18 +64,17 @@ func (r *cloudProviderAccountsSQLRepository) listConnected(
|
|||||||
Scan(ctx)
|
Scan(ctx)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, model.InternalError(fmt.Errorf(
|
slog.ErrorContext(ctx, "error querying connected cloud accounts", "error", err)
|
||||||
"could not query connected cloud accounts: %w", err,
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "could not query connected cloud accounts")
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return accounts, nil
|
return accounts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *cloudProviderAccountsSQLRepository) get(
|
func (r *cloudProviderAccountsSQLRepository) Get(
|
||||||
ctx context.Context, orgId string, provider string, id string,
|
ctx context.Context, orgId string, provider string, id string,
|
||||||
) (*types.CloudIntegration, *model.ApiError) {
|
) (*integrationtypes.CloudIntegration, error) {
|
||||||
var result types.CloudIntegration
|
var result integrationtypes.CloudIntegration
|
||||||
|
|
||||||
err := r.store.BunDB().NewSelect().
|
err := r.store.BunDB().NewSelect().
|
||||||
Model(&result).
|
Model(&result).
|
||||||
@@ -82,23 +83,25 @@ func (r *cloudProviderAccountsSQLRepository) get(
|
|||||||
Where("id = ?", id).
|
Where("id = ?", id).
|
||||||
Scan(ctx)
|
Scan(ctx)
|
||||||
|
|
||||||
if err == sql.ErrNoRows {
|
if err != nil {
|
||||||
return nil, model.NotFoundError(fmt.Errorf(
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
"couldn't find account with Id %s", id,
|
return nil, errors.WrapNotFoundf(
|
||||||
))
|
err,
|
||||||
} else if err != nil {
|
CodeCloudIntegrationAccountNotFound,
|
||||||
return nil, model.InternalError(fmt.Errorf(
|
"couldn't find account with Id %s", id,
|
||||||
"couldn't query cloud provider accounts: %w", err,
|
)
|
||||||
))
|
}
|
||||||
|
|
||||||
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't query cloud provider account")
|
||||||
}
|
}
|
||||||
|
|
||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *cloudProviderAccountsSQLRepository) getConnectedCloudAccount(
|
func (r *cloudProviderAccountsSQLRepository) GetConnectedCloudAccount(
|
||||||
ctx context.Context, orgId string, provider string, accountId string,
|
ctx context.Context, orgId string, provider string, accountId string,
|
||||||
) (*types.CloudIntegration, *model.ApiError) {
|
) (*integrationtypes.CloudIntegration, error) {
|
||||||
var result types.CloudIntegration
|
var result integrationtypes.CloudIntegration
|
||||||
|
|
||||||
err := r.store.BunDB().NewSelect().
|
err := r.store.BunDB().NewSelect().
|
||||||
Model(&result).
|
Model(&result).
|
||||||
@@ -109,29 +112,25 @@ func (r *cloudProviderAccountsSQLRepository) getConnectedCloudAccount(
|
|||||||
Where("removed_at is NULL").
|
Where("removed_at is NULL").
|
||||||
Scan(ctx)
|
Scan(ctx)
|
||||||
|
|
||||||
if err == sql.ErrNoRows {
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
return nil, model.NotFoundError(fmt.Errorf(
|
return nil, errors.WrapNotFoundf(err, CodeCloudIntegrationAccountNotFound, "couldn't find connected cloud account %s", accountId)
|
||||||
"couldn't find connected cloud account %s", accountId,
|
|
||||||
))
|
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return nil, model.InternalError(fmt.Errorf(
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't query cloud provider account")
|
||||||
"couldn't query cloud provider accounts: %w", err,
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *cloudProviderAccountsSQLRepository) upsert(
|
func (r *cloudProviderAccountsSQLRepository) Upsert(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
orgId string,
|
orgId string,
|
||||||
provider string,
|
provider string,
|
||||||
id *string,
|
id *string,
|
||||||
config *types.AccountConfig,
|
config []byte,
|
||||||
accountId *string,
|
accountId *string,
|
||||||
agentReport *types.AgentReport,
|
agentReport *integrationtypes.AgentReport,
|
||||||
removedAt *time.Time,
|
removedAt *time.Time,
|
||||||
) (*types.CloudIntegration, *model.ApiError) {
|
) (*integrationtypes.CloudIntegration, error) {
|
||||||
// Insert
|
// Insert
|
||||||
if id == nil {
|
if id == nil {
|
||||||
temp := valuer.GenerateUUID().StringValue()
|
temp := valuer.GenerateUUID().StringValue()
|
||||||
@@ -181,7 +180,7 @@ func (r *cloudProviderAccountsSQLRepository) upsert(
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
integration := types.CloudIntegration{
|
integration := integrationtypes.CloudIntegration{
|
||||||
OrgID: orgId,
|
OrgID: orgId,
|
||||||
Provider: provider,
|
Provider: provider,
|
||||||
Identifiable: types.Identifiable{ID: valuer.MustNewUUID(*id)},
|
Identifiable: types.Identifiable{ID: valuer.MustNewUUID(*id)},
|
||||||
@@ -189,28 +188,25 @@ func (r *cloudProviderAccountsSQLRepository) upsert(
|
|||||||
CreatedAt: time.Now(),
|
CreatedAt: time.Now(),
|
||||||
UpdatedAt: time.Now(),
|
UpdatedAt: time.Now(),
|
||||||
},
|
},
|
||||||
Config: config,
|
Config: string(config),
|
||||||
AccountID: accountId,
|
AccountID: accountId,
|
||||||
LastAgentReport: agentReport,
|
LastAgentReport: agentReport,
|
||||||
RemovedAt: removedAt,
|
RemovedAt: removedAt,
|
||||||
}
|
}
|
||||||
|
|
||||||
_, dbErr := r.store.BunDB().NewInsert().
|
_, err := r.store.BunDB().NewInsert().
|
||||||
Model(&integration).
|
Model(&integration).
|
||||||
On(onConflictClause).
|
On(onConflictClause).
|
||||||
Exec(ctx)
|
Exec(ctx)
|
||||||
|
|
||||||
if dbErr != nil {
|
if err != nil {
|
||||||
return nil, model.InternalError(fmt.Errorf(
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't upsert cloud integration account")
|
||||||
"could not upsert cloud account record: %w", dbErr,
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
upsertedAccount, apiErr := r.get(ctx, orgId, provider, *id)
|
upsertedAccount, err := r.Get(ctx, orgId, provider, *id)
|
||||||
if apiErr != nil {
|
if err != nil {
|
||||||
return nil, model.InternalError(fmt.Errorf(
|
slog.ErrorContext(ctx, "error upserting cloud integration account", "error", err)
|
||||||
"couldn't fetch upserted account by id: %w", apiErr.ToError(),
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't get upserted cloud integration account")
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return upsertedAccount, nil
|
return upsertedAccount, nil
|
||||||
@@ -1,64 +1,63 @@
|
|||||||
package cloudintegrations
|
package store
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"fmt"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||||
"github.com/SigNoz/signoz/pkg/types"
|
"github.com/SigNoz/signoz/pkg/types"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||||
"github.com/SigNoz/signoz/pkg/valuer"
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
CodeServiceConfigNotFound = errors.MustNewCode("service_config_not_found")
|
||||||
|
)
|
||||||
|
|
||||||
type ServiceConfigDatabase interface {
|
type ServiceConfigDatabase interface {
|
||||||
get(
|
Get(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
orgID string,
|
orgID string,
|
||||||
cloudAccountId string,
|
cloudAccountId string,
|
||||||
serviceType string,
|
serviceType string,
|
||||||
) (*types.CloudServiceConfig, *model.ApiError)
|
) ([]byte, error)
|
||||||
|
|
||||||
upsert(
|
Upsert(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
orgID string,
|
orgID string,
|
||||||
cloudProvider string,
|
cloudProvider string,
|
||||||
cloudAccountId string,
|
cloudAccountId string,
|
||||||
serviceId string,
|
serviceId string,
|
||||||
config types.CloudServiceConfig,
|
config []byte,
|
||||||
) (*types.CloudServiceConfig, *model.ApiError)
|
) ([]byte, error)
|
||||||
|
|
||||||
getAllForAccount(
|
GetAllForAccount(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
orgID string,
|
orgID string,
|
||||||
cloudAccountId string,
|
cloudAccountId string,
|
||||||
) (
|
) (
|
||||||
configsBySvcId map[string]*types.CloudServiceConfig,
|
map[string][]byte,
|
||||||
apiErr *model.ApiError,
|
error,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newServiceConfigRepository(store sqlstore.SQLStore) (
|
func NewServiceConfigRepository(store sqlstore.SQLStore) ServiceConfigDatabase {
|
||||||
*serviceConfigSQLRepository, error,
|
return &serviceConfigSQLRepository{store: store}
|
||||||
) {
|
|
||||||
return &serviceConfigSQLRepository{
|
|
||||||
store: store,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type serviceConfigSQLRepository struct {
|
type serviceConfigSQLRepository struct {
|
||||||
store sqlstore.SQLStore
|
store sqlstore.SQLStore
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *serviceConfigSQLRepository) get(
|
func (r *serviceConfigSQLRepository) Get(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
orgID string,
|
orgID string,
|
||||||
cloudAccountId string,
|
cloudAccountId string,
|
||||||
serviceType string,
|
serviceType string,
|
||||||
) (*types.CloudServiceConfig, *model.ApiError) {
|
) ([]byte, error) {
|
||||||
|
var result integrationtypes.CloudIntegrationService
|
||||||
var result types.CloudIntegrationService
|
|
||||||
|
|
||||||
err := r.store.BunDB().NewSelect().
|
err := r.store.BunDB().NewSelect().
|
||||||
Model(&result).
|
Model(&result).
|
||||||
@@ -67,36 +66,30 @@ func (r *serviceConfigSQLRepository) get(
|
|||||||
Where("ci.id = ?", cloudAccountId).
|
Where("ci.id = ?", cloudAccountId).
|
||||||
Where("cis.type = ?", serviceType).
|
Where("cis.type = ?", serviceType).
|
||||||
Scan(ctx)
|
Scan(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return nil, errors.WrapNotFoundf(err, CodeServiceConfigNotFound, "couldn't find config for cloud account %s", cloudAccountId)
|
||||||
|
}
|
||||||
|
|
||||||
if err == sql.ErrNoRows {
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't query cloud service config")
|
||||||
return nil, model.NotFoundError(fmt.Errorf(
|
|
||||||
"couldn't find config for cloud account %s",
|
|
||||||
cloudAccountId,
|
|
||||||
))
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, model.InternalError(fmt.Errorf(
|
|
||||||
"couldn't query cloud service config: %w", err,
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &result.Config, nil
|
return []byte(result.Config), nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *serviceConfigSQLRepository) upsert(
|
func (r *serviceConfigSQLRepository) Upsert(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
orgID string,
|
orgID string,
|
||||||
cloudProvider string,
|
cloudProvider string,
|
||||||
cloudAccountId string,
|
cloudAccountId string,
|
||||||
serviceId string,
|
serviceId string,
|
||||||
config types.CloudServiceConfig,
|
config []byte,
|
||||||
) (*types.CloudServiceConfig, *model.ApiError) {
|
) ([]byte, error) {
|
||||||
|
|
||||||
// get cloud integration id from account id
|
// get cloud integration id from account id
|
||||||
// if the account is not connected, we don't need to upsert the config
|
// if the account is not connected, we don't need to upsert the config
|
||||||
var cloudIntegrationId string
|
var cloudIntegrationId string
|
||||||
err := r.store.BunDB().NewSelect().
|
err := r.store.BunDB().NewSelect().
|
||||||
Model((*types.CloudIntegration)(nil)).
|
Model((*integrationtypes.CloudIntegration)(nil)).
|
||||||
Column("id").
|
Column("id").
|
||||||
Where("provider = ?", cloudProvider).
|
Where("provider = ?", cloudProvider).
|
||||||
Where("account_id = ?", cloudAccountId).
|
Where("account_id = ?", cloudAccountId).
|
||||||
@@ -104,20 +97,24 @@ func (r *serviceConfigSQLRepository) upsert(
|
|||||||
Where("removed_at is NULL").
|
Where("removed_at is NULL").
|
||||||
Where("last_agent_report is not NULL").
|
Where("last_agent_report is not NULL").
|
||||||
Scan(ctx, &cloudIntegrationId)
|
Scan(ctx, &cloudIntegrationId)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, model.InternalError(fmt.Errorf(
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
"couldn't query cloud integration id: %w", err,
|
return nil, errors.WrapNotFoundf(
|
||||||
))
|
err,
|
||||||
|
CodeCloudIntegrationAccountNotFound,
|
||||||
|
"couldn't find active cloud integration account",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't query cloud integration id")
|
||||||
}
|
}
|
||||||
|
|
||||||
serviceConfig := types.CloudIntegrationService{
|
serviceConfig := integrationtypes.CloudIntegrationService{
|
||||||
Identifiable: types.Identifiable{ID: valuer.GenerateUUID()},
|
Identifiable: types.Identifiable{ID: valuer.GenerateUUID()},
|
||||||
TimeAuditable: types.TimeAuditable{
|
TimeAuditable: types.TimeAuditable{
|
||||||
CreatedAt: time.Now(),
|
CreatedAt: time.Now(),
|
||||||
UpdatedAt: time.Now(),
|
UpdatedAt: time.Now(),
|
||||||
},
|
},
|
||||||
Config: config,
|
Config: string(config),
|
||||||
Type: serviceId,
|
Type: serviceId,
|
||||||
CloudIntegrationID: cloudIntegrationId,
|
CloudIntegrationID: cloudIntegrationId,
|
||||||
}
|
}
|
||||||
@@ -126,21 +123,18 @@ func (r *serviceConfigSQLRepository) upsert(
|
|||||||
On("conflict(cloud_integration_id, type) do update set config=excluded.config, updated_at=excluded.updated_at").
|
On("conflict(cloud_integration_id, type) do update set config=excluded.config, updated_at=excluded.updated_at").
|
||||||
Exec(ctx)
|
Exec(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, model.InternalError(fmt.Errorf(
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't upsert cloud service config")
|
||||||
"could not upsert cloud service config: %w", err,
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &serviceConfig.Config, nil
|
return config, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *serviceConfigSQLRepository) getAllForAccount(
|
func (r *serviceConfigSQLRepository) GetAllForAccount(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
orgID string,
|
orgID string,
|
||||||
cloudAccountId string,
|
cloudAccountId string,
|
||||||
) (map[string]*types.CloudServiceConfig, *model.ApiError) {
|
) (map[string][]byte, error) {
|
||||||
serviceConfigs := []types.CloudIntegrationService{}
|
var serviceConfigs []integrationtypes.CloudIntegrationService
|
||||||
|
|
||||||
err := r.store.BunDB().NewSelect().
|
err := r.store.BunDB().NewSelect().
|
||||||
Model(&serviceConfigs).
|
Model(&serviceConfigs).
|
||||||
@@ -149,15 +143,13 @@ func (r *serviceConfigSQLRepository) getAllForAccount(
|
|||||||
Where("ci.org_id = ?", orgID).
|
Where("ci.org_id = ?", orgID).
|
||||||
Scan(ctx)
|
Scan(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, model.InternalError(fmt.Errorf(
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't query service configs from db")
|
||||||
"could not query service configs from db: %w", err,
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
result := map[string]*types.CloudServiceConfig{}
|
result := make(map[string][]byte)
|
||||||
|
|
||||||
for _, r := range serviceConfigs {
|
for _, r := range serviceConfigs {
|
||||||
result[r.Type] = &r.Config
|
result[r.Type] = []byte(r.Config)
|
||||||
}
|
}
|
||||||
|
|
||||||
return result, nil
|
return result, nil
|
||||||
@@ -6,11 +6,7 @@ import (
|
|||||||
"database/sql"
|
"database/sql"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"github.com/SigNoz/signoz/pkg/errors"
|
|
||||||
"github.com/SigNoz/signoz/pkg/flagger"
|
|
||||||
"github.com/SigNoz/signoz/pkg/modules/thirdpartyapi"
|
|
||||||
"github.com/SigNoz/signoz/pkg/queryparser"
|
|
||||||
|
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
@@ -25,14 +21,19 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/SigNoz/signoz/pkg/alertmanager"
|
"github.com/SigNoz/signoz/pkg/alertmanager"
|
||||||
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
errorsV2 "github.com/SigNoz/signoz/pkg/errors"
|
errorsV2 "github.com/SigNoz/signoz/pkg/errors"
|
||||||
|
"github.com/SigNoz/signoz/pkg/flagger"
|
||||||
"github.com/SigNoz/signoz/pkg/http/middleware"
|
"github.com/SigNoz/signoz/pkg/http/middleware"
|
||||||
"github.com/SigNoz/signoz/pkg/http/render"
|
"github.com/SigNoz/signoz/pkg/http/render"
|
||||||
"github.com/SigNoz/signoz/pkg/licensing"
|
"github.com/SigNoz/signoz/pkg/licensing"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
|
"github.com/SigNoz/signoz/pkg/modules/thirdpartyapi"
|
||||||
|
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/metricsexplorer"
|
"github.com/SigNoz/signoz/pkg/query-service/app/metricsexplorer"
|
||||||
|
"github.com/SigNoz/signoz/pkg/queryparser"
|
||||||
"github.com/SigNoz/signoz/pkg/signoz"
|
"github.com/SigNoz/signoz/pkg/signoz"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||||
"github.com/SigNoz/signoz/pkg/valuer"
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
"github.com/prometheus/prometheus/promql"
|
"github.com/prometheus/prometheus/promql"
|
||||||
|
|
||||||
@@ -44,7 +45,6 @@ import (
|
|||||||
"github.com/SigNoz/signoz/pkg/contextlinks"
|
"github.com/SigNoz/signoz/pkg/contextlinks"
|
||||||
traceFunnelsModule "github.com/SigNoz/signoz/pkg/modules/tracefunnel"
|
traceFunnelsModule "github.com/SigNoz/signoz/pkg/modules/tracefunnel"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
|
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
|
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/inframetrics"
|
"github.com/SigNoz/signoz/pkg/query-service/app/inframetrics"
|
||||||
queues2 "github.com/SigNoz/signoz/pkg/query-service/app/integrations/messagingQueues/queues"
|
queues2 "github.com/SigNoz/signoz/pkg/query-service/app/integrations/messagingQueues/queues"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/logs"
|
"github.com/SigNoz/signoz/pkg/query-service/app/logs"
|
||||||
@@ -111,7 +111,7 @@ type APIHandler struct {
|
|||||||
|
|
||||||
IntegrationsController *integrations.Controller
|
IntegrationsController *integrations.Controller
|
||||||
|
|
||||||
CloudIntegrationsController *cloudintegrations.Controller
|
cloudIntegrationsRegistry map[integrationtypes.CloudProviderType]integrationtypes.CloudProvider
|
||||||
|
|
||||||
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
||||||
|
|
||||||
@@ -158,9 +158,6 @@ type APIHandlerOpts struct {
|
|||||||
// Integrations
|
// Integrations
|
||||||
IntegrationsController *integrations.Controller
|
IntegrationsController *integrations.Controller
|
||||||
|
|
||||||
// Cloud Provider Integrations
|
|
||||||
CloudIntegrationsController *cloudintegrations.Controller
|
|
||||||
|
|
||||||
// Log parsing pipelines
|
// Log parsing pipelines
|
||||||
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
|
||||||
|
|
||||||
@@ -174,6 +171,8 @@ type APIHandlerOpts struct {
|
|||||||
QueryParserAPI *queryparser.API
|
QueryParserAPI *queryparser.API
|
||||||
|
|
||||||
Signoz *signoz.SigNoz
|
Signoz *signoz.SigNoz
|
||||||
|
|
||||||
|
Logger *slog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAPIHandler returns an APIHandler
|
// NewAPIHandler returns an APIHandler
|
||||||
@@ -209,12 +208,21 @@ func NewAPIHandler(opts APIHandlerOpts, config signoz.Config) (*APIHandler, erro
|
|||||||
summaryService := metricsexplorer.NewSummaryService(opts.Reader, opts.RuleManager, opts.Signoz.Modules.Dashboard)
|
summaryService := metricsexplorer.NewSummaryService(opts.Reader, opts.RuleManager, opts.Signoz.Modules.Dashboard)
|
||||||
//quickFilterModule := quickfilter.NewAPI(opts.QuickFilterModule)
|
//quickFilterModule := quickfilter.NewAPI(opts.QuickFilterModule)
|
||||||
|
|
||||||
|
cloudIntegrationsRegistry, err := cloudintegrations.NewCloudProviderRegistry(
|
||||||
|
opts.Logger,
|
||||||
|
opts.Signoz.SQLStore,
|
||||||
|
opts.Signoz.Querier,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
aH := &APIHandler{
|
aH := &APIHandler{
|
||||||
reader: opts.Reader,
|
reader: opts.Reader,
|
||||||
temporalityMap: make(map[string]map[v3.Temporality]bool),
|
temporalityMap: make(map[string]map[v3.Temporality]bool),
|
||||||
ruleManager: opts.RuleManager,
|
ruleManager: opts.RuleManager,
|
||||||
IntegrationsController: opts.IntegrationsController,
|
IntegrationsController: opts.IntegrationsController,
|
||||||
CloudIntegrationsController: opts.CloudIntegrationsController,
|
cloudIntegrationsRegistry: cloudIntegrationsRegistry,
|
||||||
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
LogsParsingPipelineController: opts.LogsParsingPipelineController,
|
||||||
querier: querier,
|
querier: querier,
|
||||||
querierV2: querierv2,
|
querierV2: querierv2,
|
||||||
@@ -1209,13 +1217,19 @@ func (aH *APIHandler) Get(rw http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
dashboard := new(dashboardtypes.Dashboard)
|
dashboard := new(dashboardtypes.Dashboard)
|
||||||
if aH.CloudIntegrationsController.IsCloudIntegrationDashboardUuid(id) {
|
if integrationtypes.IsCloudIntegrationDashboardUuid(id) {
|
||||||
cloudIntegrationDashboard, apiErr := aH.CloudIntegrationsController.GetDashboardById(ctx, orgID, id)
|
cloudProvider, err := integrationtypes.GetCloudProviderFromDashboardID(id)
|
||||||
if apiErr != nil {
|
if err != nil {
|
||||||
render.Error(rw, errorsV2.Wrapf(apiErr, errorsV2.TypeInternal, errorsV2.CodeInternal, "failed to get dashboard"))
|
render.Error(rw, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
dashboard = cloudIntegrationDashboard
|
|
||||||
|
integrationDashboard, err := aH.cloudIntegrationsRegistry[cloudProvider].GetDashboard(ctx, id, orgID)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(rw, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
dashboard = integrationDashboard
|
||||||
} else if aH.IntegrationsController.IsInstalledIntegrationDashboardID(id) {
|
} else if aH.IntegrationsController.IsInstalledIntegrationDashboardID(id) {
|
||||||
integrationDashboard, apiErr := aH.IntegrationsController.GetInstalledIntegrationDashboardById(ctx, orgID, id)
|
integrationDashboard, apiErr := aH.IntegrationsController.GetInstalledIntegrationDashboardById(ctx, orgID, id)
|
||||||
if apiErr != nil {
|
if apiErr != nil {
|
||||||
@@ -1279,11 +1293,13 @@ func (aH *APIHandler) List(rw http.ResponseWriter, r *http.Request) {
|
|||||||
dashboards = append(dashboards, installedIntegrationDashboards...)
|
dashboards = append(dashboards, installedIntegrationDashboards...)
|
||||||
}
|
}
|
||||||
|
|
||||||
cloudIntegrationDashboards, apiErr := aH.CloudIntegrationsController.AvailableDashboards(ctx, orgID)
|
for _, provider := range aH.cloudIntegrationsRegistry {
|
||||||
if apiErr != nil {
|
cloudIntegrationDashboards, err := provider.GetAvailableDashboards(ctx, orgID)
|
||||||
zap.L().Error("failed to get dashboards for cloud integrations", zap.Error(apiErr))
|
if err != nil {
|
||||||
} else {
|
zap.L().Error("failed to get dashboards for cloud integrations", zap.Error(apiErr))
|
||||||
dashboards = append(dashboards, cloudIntegrationDashboards...)
|
} else {
|
||||||
|
dashboards = append(dashboards, cloudIntegrationDashboards...)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
gettableDashboards, err := dashboardtypes.NewGettableDashboardsFromDashboards(dashboards)
|
gettableDashboards, err := dashboardtypes.NewGettableDashboardsFromDashboards(dashboards)
|
||||||
@@ -3259,15 +3275,15 @@ func (aH *APIHandler) GetIntegrationConnectionStatus(w http.ResponseWriter, r *h
|
|||||||
lookbackSeconds = 15 * 60
|
lookbackSeconds = 15 * 60
|
||||||
}
|
}
|
||||||
|
|
||||||
connectionStatus, apiErr := aH.calculateConnectionStatus(
|
connectionStatus, err := aH.calculateConnectionStatus(
|
||||||
r.Context(), orgID, connectionTests, lookbackSeconds,
|
r.Context(), orgID, connectionTests, lookbackSeconds,
|
||||||
)
|
)
|
||||||
if apiErr != nil {
|
if err != nil {
|
||||||
RespondError(w, apiErr, "Failed to calculate integration connection status")
|
render.Error(w, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
aH.Respond(w, connectionStatus)
|
render.Success(w, http.StatusOK, connectionStatus)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (aH *APIHandler) calculateConnectionStatus(
|
func (aH *APIHandler) calculateConnectionStatus(
|
||||||
@@ -3275,10 +3291,11 @@ func (aH *APIHandler) calculateConnectionStatus(
|
|||||||
orgID valuer.UUID,
|
orgID valuer.UUID,
|
||||||
connectionTests *integrations.IntegrationConnectionTests,
|
connectionTests *integrations.IntegrationConnectionTests,
|
||||||
lookbackSeconds int64,
|
lookbackSeconds int64,
|
||||||
) (*integrations.IntegrationConnectionStatus, *model.ApiError) {
|
) (*integrations.IntegrationConnectionStatus, error) {
|
||||||
// Calculate connection status for signals in parallel
|
// Calculate connection status for signals in parallel
|
||||||
|
|
||||||
result := &integrations.IntegrationConnectionStatus{}
|
result := &integrations.IntegrationConnectionStatus{}
|
||||||
|
// TODO: migrate to errors package
|
||||||
errors := []*model.ApiError{}
|
errors := []*model.ApiError{}
|
||||||
var resultLock sync.Mutex
|
var resultLock sync.Mutex
|
||||||
|
|
||||||
@@ -3476,12 +3493,14 @@ func (aH *APIHandler) UninstallIntegration(w http.ResponseWriter, r *http.Reques
|
|||||||
aH.Respond(w, map[string]interface{}{})
|
aH.Respond(w, map[string]interface{}{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// cloud provider integrations
|
// RegisterCloudIntegrationsRoutes register routes for cloud provider integrations
|
||||||
func (aH *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *middleware.AuthZ) {
|
func (aH *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *middleware.AuthZ) {
|
||||||
subRouter := router.PathPrefix("/api/v1/cloud-integrations").Subrouter()
|
subRouter := router.PathPrefix("/api/v1/cloud-integrations").Subrouter()
|
||||||
|
|
||||||
|
subRouter.Use(middleware.NewRecovery(aH.Signoz.Instrumentation.Logger()).Wrap)
|
||||||
|
|
||||||
subRouter.HandleFunc(
|
subRouter.HandleFunc(
|
||||||
"/{cloudProvider}/accounts/generate-connection-url", am.EditAccess(aH.CloudIntegrationsGenerateConnectionUrl),
|
"/{cloudProvider}/accounts/generate-connection-url", am.EditAccess(aH.CloudIntegrationsGenerateConnectionArtifact),
|
||||||
).Methods(http.MethodPost)
|
).Methods(http.MethodPost)
|
||||||
|
|
||||||
subRouter.HandleFunc(
|
subRouter.HandleFunc(
|
||||||
@@ -3515,198 +3534,137 @@ func (aH *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *mi
|
|||||||
subRouter.HandleFunc(
|
subRouter.HandleFunc(
|
||||||
"/{cloudProvider}/services/{serviceId}/config", am.EditAccess(aH.CloudIntegrationsUpdateServiceConfig),
|
"/{cloudProvider}/services/{serviceId}/config", am.EditAccess(aH.CloudIntegrationsUpdateServiceConfig),
|
||||||
).Methods(http.MethodPost)
|
).Methods(http.MethodPost)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (aH *APIHandler) CloudIntegrationsListConnectedAccounts(
|
func (aH *APIHandler) CloudIntegrationsGenerateConnectionArtifact(w http.ResponseWriter, r *http.Request) {
|
||||||
w http.ResponseWriter, r *http.Request,
|
cloudProviderString := mux.Vars(r)["cloudProvider"]
|
||||||
) {
|
|
||||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
|
||||||
|
|
||||||
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
|
|
||||||
if errv2 != nil {
|
|
||||||
render.Error(w, errv2)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, apiErr := aH.CloudIntegrationsController.ListConnectedAccounts(
|
|
||||||
r.Context(), claims.OrgID, cloudProvider,
|
|
||||||
)
|
|
||||||
|
|
||||||
if apiErr != nil {
|
|
||||||
RespondError(w, apiErr, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
aH.Respond(w, resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aH *APIHandler) CloudIntegrationsGenerateConnectionUrl(
|
|
||||||
w http.ResponseWriter, r *http.Request,
|
|
||||||
) {
|
|
||||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
|
||||||
|
|
||||||
req := cloudintegrations.GenerateConnectionUrlRequest{}
|
|
||||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
||||||
RespondError(w, model.BadRequest(err), nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
|
|
||||||
if errv2 != nil {
|
|
||||||
render.Error(w, errv2)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
result, apiErr := aH.CloudIntegrationsController.GenerateConnectionUrl(
|
|
||||||
r.Context(), claims.OrgID, cloudProvider, req,
|
|
||||||
)
|
|
||||||
|
|
||||||
if apiErr != nil {
|
|
||||||
RespondError(w, apiErr, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
aH.Respond(w, result)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aH *APIHandler) CloudIntegrationsGetAccountStatus(
|
|
||||||
w http.ResponseWriter, r *http.Request,
|
|
||||||
) {
|
|
||||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
|
||||||
accountId := mux.Vars(r)["accountId"]
|
|
||||||
|
|
||||||
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
|
|
||||||
if errv2 != nil {
|
|
||||||
render.Error(w, errv2)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, apiErr := aH.CloudIntegrationsController.GetAccountStatus(
|
|
||||||
r.Context(), claims.OrgID, cloudProvider, accountId,
|
|
||||||
)
|
|
||||||
|
|
||||||
if apiErr != nil {
|
|
||||||
RespondError(w, apiErr, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
aH.Respond(w, resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aH *APIHandler) CloudIntegrationsAgentCheckIn(
|
|
||||||
w http.ResponseWriter, r *http.Request,
|
|
||||||
) {
|
|
||||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
|
||||||
|
|
||||||
req := cloudintegrations.AgentCheckInRequest{}
|
|
||||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
||||||
RespondError(w, model.BadRequest(err), nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
|
|
||||||
if errv2 != nil {
|
|
||||||
render.Error(w, errv2)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := aH.CloudIntegrationsController.CheckInAsAgent(
|
|
||||||
r.Context(), claims.OrgID, cloudProvider, req,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
render.Error(w, err)
|
render.Error(w, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
aH.Respond(w, result)
|
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||||
|
if err != nil {
|
||||||
|
render.Error(w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
reqBody, err := io.ReadAll(r.Body)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(w, errors.WrapInternalf(err, errors.CodeInternal, "failed to read request body"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].GenerateConnectionArtifact(r.Context(), &integrationtypes.PostableConnectionArtifact{
|
||||||
|
OrgID: claims.OrgID,
|
||||||
|
Data: reqBody,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
aH.Signoz.Instrumentation.Logger().ErrorContext(r.Context(),
|
||||||
|
"failed to generate connection artifact for cloud integration",
|
||||||
|
slog.String("cloudProvider", cloudProviderString),
|
||||||
|
slog.String("orgID", claims.OrgID),
|
||||||
|
)
|
||||||
|
render.Error(w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
render.Success(w, http.StatusOK, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (aH *APIHandler) CloudIntegrationsUpdateAccountConfig(
|
func (aH *APIHandler) CloudIntegrationsListConnectedAccounts(w http.ResponseWriter, r *http.Request) {
|
||||||
w http.ResponseWriter, r *http.Request,
|
cloudProviderString := mux.Vars(r)["cloudProvider"]
|
||||||
) {
|
|
||||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||||
|
if err != nil {
|
||||||
|
render.Error(w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].ListConnectedAccounts(r.Context(), claims.OrgID)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
render.Success(w, http.StatusOK, resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aH *APIHandler) CloudIntegrationsGetAccountStatus(w http.ResponseWriter, r *http.Request) {
|
||||||
|
cloudProviderString := mux.Vars(r)["cloudProvider"]
|
||||||
|
|
||||||
|
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
accountId := mux.Vars(r)["accountId"]
|
accountId := mux.Vars(r)["accountId"]
|
||||||
|
|
||||||
req := cloudintegrations.UpdateAccountConfigRequest{}
|
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
if err != nil {
|
||||||
RespondError(w, model.BadRequest(err), nil)
|
render.Error(w, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
|
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].GetAccountStatus(r.Context(), claims.OrgID, accountId)
|
||||||
if errv2 != nil {
|
if err != nil {
|
||||||
render.Error(w, errv2)
|
render.Error(w, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
result, apiErr := aH.CloudIntegrationsController.UpdateAccountConfig(
|
render.Success(w, http.StatusOK, resp)
|
||||||
r.Context(), claims.OrgID, cloudProvider, accountId, req,
|
|
||||||
)
|
|
||||||
|
|
||||||
if apiErr != nil {
|
|
||||||
RespondError(w, apiErr, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
aH.Respond(w, result)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (aH *APIHandler) CloudIntegrationsDisconnectAccount(
|
func (aH *APIHandler) CloudIntegrationsAgentCheckIn(w http.ResponseWriter, r *http.Request) {
|
||||||
w http.ResponseWriter, r *http.Request,
|
cloudProviderString := mux.Vars(r)["cloudProvider"]
|
||||||
) {
|
|
||||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
|
||||||
accountId := mux.Vars(r)["accountId"]
|
|
||||||
|
|
||||||
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
|
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
|
||||||
if errv2 != nil {
|
if err != nil {
|
||||||
render.Error(w, errv2)
|
render.Error(w, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
result, apiErr := aH.CloudIntegrationsController.DisconnectAccount(
|
req := new(integrationtypes.PostableAgentCheckInPayload)
|
||||||
r.Context(), claims.OrgID, cloudProvider, accountId,
|
if err = json.NewDecoder(r.Body).Decode(req); err != nil {
|
||||||
)
|
render.Error(w, errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid request body"))
|
||||||
|
|
||||||
if apiErr != nil {
|
|
||||||
RespondError(w, apiErr, nil)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
aH.Respond(w, result)
|
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||||
|
if err != nil {
|
||||||
|
render.Error(w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
req.OrgID = claims.OrgID
|
||||||
|
|
||||||
|
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].AgentCheckIn(r.Context(), req)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
render.Success(w, http.StatusOK, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (aH *APIHandler) CloudIntegrationsListServices(
|
func (aH *APIHandler) CloudIntegrationsUpdateAccountConfig(w http.ResponseWriter, r *http.Request) {
|
||||||
w http.ResponseWriter, r *http.Request,
|
cloudProviderString := mux.Vars(r)["cloudProvider"]
|
||||||
) {
|
|
||||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
|
||||||
|
|
||||||
var cloudAccountId *string
|
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
|
||||||
|
if err != nil {
|
||||||
cloudAccountIdQP := r.URL.Query().Get("cloud_account_id")
|
render.Error(w, err)
|
||||||
if len(cloudAccountIdQP) > 0 {
|
|
||||||
cloudAccountId = &cloudAccountIdQP
|
|
||||||
}
|
|
||||||
|
|
||||||
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
|
|
||||||
if errv2 != nil {
|
|
||||||
render.Error(w, errv2)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, apiErr := aH.CloudIntegrationsController.ListServices(
|
|
||||||
r.Context(), claims.OrgID, cloudProvider, cloudAccountId,
|
|
||||||
)
|
|
||||||
|
|
||||||
if apiErr != nil {
|
|
||||||
RespondError(w, apiErr, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
aH.Respond(w, resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aH *APIHandler) CloudIntegrationsGetServiceDetails(
|
|
||||||
w http.ResponseWriter, r *http.Request,
|
|
||||||
) {
|
|
||||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
render.Error(w, err)
|
render.Error(w, err)
|
||||||
@@ -3718,7 +3676,100 @@ func (aH *APIHandler) CloudIntegrationsGetServiceDetails(
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
accountId := mux.Vars(r)["accountId"]
|
||||||
|
|
||||||
|
reqBody, err := io.ReadAll(r.Body)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(w, errors.WrapInternalf(err, errors.CodeInternal, "failed to read request body"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].UpdateAccountConfig(r.Context(), orgID, accountId, reqBody)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
render.Success(w, http.StatusOK, resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aH *APIHandler) CloudIntegrationsDisconnectAccount(w http.ResponseWriter, r *http.Request) {
|
||||||
|
cloudProviderString := mux.Vars(r)["cloudProvider"]
|
||||||
|
|
||||||
|
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
accountId := mux.Vars(r)["accountId"]
|
||||||
|
|
||||||
|
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||||
|
if err != nil {
|
||||||
|
render.Error(w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := aH.cloudIntegrationsRegistry[cloudProvider].DisconnectAccount(r.Context(), claims.OrgID, accountId)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
render.Success(w, http.StatusOK, result)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aH *APIHandler) CloudIntegrationsListServices(w http.ResponseWriter, r *http.Request) {
|
||||||
|
cloudProviderString := mux.Vars(r)["cloudProvider"]
|
||||||
|
|
||||||
|
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var cloudAccountId *string
|
||||||
|
|
||||||
|
cloudAccountIdQP := r.URL.Query().Get("cloud_account_id")
|
||||||
|
if len(cloudAccountIdQP) > 0 {
|
||||||
|
cloudAccountId = &cloudAccountIdQP
|
||||||
|
}
|
||||||
|
|
||||||
|
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||||
|
if err != nil {
|
||||||
|
render.Error(w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].ListServices(r.Context(), claims.OrgID, cloudAccountId)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
render.Success(w, http.StatusOK, resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aH *APIHandler) CloudIntegrationsGetServiceDetails(w http.ResponseWriter, r *http.Request) {
|
||||||
|
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||||
|
if err != nil {
|
||||||
|
render.Error(w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cloudProviderString := mux.Vars(r)["cloudProvider"]
|
||||||
|
|
||||||
|
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
serviceId := mux.Vars(r)["serviceId"]
|
serviceId := mux.Vars(r)["serviceId"]
|
||||||
|
|
||||||
var cloudAccountId *string
|
var cloudAccountId *string
|
||||||
@@ -3728,270 +3779,58 @@ func (aH *APIHandler) CloudIntegrationsGetServiceDetails(
|
|||||||
cloudAccountId = &cloudAccountIdQP
|
cloudAccountId = &cloudAccountIdQP
|
||||||
}
|
}
|
||||||
|
|
||||||
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
|
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].GetServiceDetails(r.Context(), &integrationtypes.GetServiceDetailsReq{
|
||||||
if errv2 != nil {
|
OrgID: orgID,
|
||||||
render.Error(w, errv2)
|
ServiceId: serviceId,
|
||||||
return
|
CloudAccountID: cloudAccountId,
|
||||||
}
|
})
|
||||||
|
|
||||||
resp, err := aH.CloudIntegrationsController.GetServiceDetails(
|
|
||||||
r.Context(), claims.OrgID, cloudProvider, serviceId, cloudAccountId,
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
render.Error(w, err)
|
render.Error(w, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add connection status for the 2 signals.
|
render.Success(w, http.StatusOK, resp)
|
||||||
if cloudAccountId != nil {
|
|
||||||
connStatus, apiErr := aH.calculateCloudIntegrationServiceConnectionStatus(
|
|
||||||
r.Context(), orgID, cloudProvider, *cloudAccountId, resp,
|
|
||||||
)
|
|
||||||
if apiErr != nil {
|
|
||||||
RespondError(w, apiErr, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
resp.ConnectionStatus = connStatus
|
|
||||||
}
|
|
||||||
|
|
||||||
aH.Respond(w, resp)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (aH *APIHandler) calculateCloudIntegrationServiceConnectionStatus(
|
func (aH *APIHandler) CloudIntegrationsUpdateServiceConfig(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx context.Context,
|
cloudProviderString := mux.Vars(r)["cloudProvider"]
|
||||||
orgID valuer.UUID,
|
|
||||||
cloudProvider string,
|
|
||||||
cloudAccountId string,
|
|
||||||
svcDetails *cloudintegrations.ServiceDetails,
|
|
||||||
) (*cloudintegrations.ServiceConnectionStatus, *model.ApiError) {
|
|
||||||
if cloudProvider != "aws" {
|
|
||||||
// TODO(Raj): Make connection check generic for all providers in a follow up change
|
|
||||||
return nil, model.BadRequest(
|
|
||||||
fmt.Errorf("unsupported cloud provider: %s", cloudProvider),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
telemetryCollectionStrategy := svcDetails.Strategy
|
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
|
||||||
if telemetryCollectionStrategy == nil {
|
|
||||||
return nil, model.InternalError(fmt.Errorf(
|
|
||||||
"service doesn't have telemetry collection strategy: %s", svcDetails.Id,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
result := &cloudintegrations.ServiceConnectionStatus{}
|
|
||||||
errors := []*model.ApiError{}
|
|
||||||
var resultLock sync.Mutex
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
// Calculate metrics connection status
|
|
||||||
if telemetryCollectionStrategy.AWSMetrics != nil {
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
metricsConnStatus, apiErr := aH.calculateAWSIntegrationSvcMetricsConnectionStatus(
|
|
||||||
ctx, cloudAccountId, telemetryCollectionStrategy.AWSMetrics, svcDetails.DataCollected.Metrics,
|
|
||||||
)
|
|
||||||
|
|
||||||
resultLock.Lock()
|
|
||||||
defer resultLock.Unlock()
|
|
||||||
|
|
||||||
if apiErr != nil {
|
|
||||||
errors = append(errors, apiErr)
|
|
||||||
} else {
|
|
||||||
result.Metrics = metricsConnStatus
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate logs connection status
|
|
||||||
if telemetryCollectionStrategy.AWSLogs != nil {
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
logsConnStatus, apiErr := aH.calculateAWSIntegrationSvcLogsConnectionStatus(
|
|
||||||
ctx, orgID, cloudAccountId, telemetryCollectionStrategy.AWSLogs,
|
|
||||||
)
|
|
||||||
|
|
||||||
resultLock.Lock()
|
|
||||||
defer resultLock.Unlock()
|
|
||||||
|
|
||||||
if apiErr != nil {
|
|
||||||
errors = append(errors, apiErr)
|
|
||||||
} else {
|
|
||||||
result.Logs = logsConnStatus
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
if len(errors) > 0 {
|
|
||||||
return nil, errors[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
func (aH *APIHandler) calculateAWSIntegrationSvcMetricsConnectionStatus(
|
|
||||||
ctx context.Context,
|
|
||||||
cloudAccountId string,
|
|
||||||
strategy *services.AWSMetricsStrategy,
|
|
||||||
metricsCollectedBySvc []services.CollectedMetric,
|
|
||||||
) (*cloudintegrations.SignalConnectionStatus, *model.ApiError) {
|
|
||||||
if strategy == nil || len(strategy.StreamFilters) < 1 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedLabelValues := map[string]string{
|
|
||||||
"cloud_provider": "aws",
|
|
||||||
"cloud_account_id": cloudAccountId,
|
|
||||||
}
|
|
||||||
|
|
||||||
metricsNamespace := strategy.StreamFilters[0].Namespace
|
|
||||||
metricsNamespaceParts := strings.Split(metricsNamespace, "/")
|
|
||||||
|
|
||||||
if len(metricsNamespaceParts) >= 2 {
|
|
||||||
expectedLabelValues["service_namespace"] = metricsNamespaceParts[0]
|
|
||||||
expectedLabelValues["service_name"] = metricsNamespaceParts[1]
|
|
||||||
} else {
|
|
||||||
// metrics for single word namespaces like "CWAgent" do not
|
|
||||||
// have the service_namespace label populated
|
|
||||||
expectedLabelValues["service_name"] = metricsNamespaceParts[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
metricNamesCollectedBySvc := []string{}
|
|
||||||
for _, cm := range metricsCollectedBySvc {
|
|
||||||
metricNamesCollectedBySvc = append(metricNamesCollectedBySvc, cm.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
statusForLastReceivedMetric, apiErr := aH.reader.GetLatestReceivedMetric(
|
|
||||||
ctx, metricNamesCollectedBySvc, expectedLabelValues,
|
|
||||||
)
|
|
||||||
if apiErr != nil {
|
|
||||||
return nil, apiErr
|
|
||||||
}
|
|
||||||
|
|
||||||
if statusForLastReceivedMetric != nil {
|
|
||||||
return &cloudintegrations.SignalConnectionStatus{
|
|
||||||
LastReceivedTsMillis: statusForLastReceivedMetric.LastReceivedTsMillis,
|
|
||||||
LastReceivedFrom: "signoz-aws-integration",
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aH *APIHandler) calculateAWSIntegrationSvcLogsConnectionStatus(
|
|
||||||
ctx context.Context,
|
|
||||||
orgID valuer.UUID,
|
|
||||||
cloudAccountId string,
|
|
||||||
strategy *services.AWSLogsStrategy,
|
|
||||||
) (*cloudintegrations.SignalConnectionStatus, *model.ApiError) {
|
|
||||||
if strategy == nil || len(strategy.Subscriptions) < 1 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
logGroupNamePrefix := strategy.Subscriptions[0].LogGroupNamePrefix
|
|
||||||
if len(logGroupNamePrefix) < 1 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
logsConnTestFilter := &v3.FilterSet{
|
|
||||||
Operator: "AND",
|
|
||||||
Items: []v3.FilterItem{
|
|
||||||
{
|
|
||||||
Key: v3.AttributeKey{
|
|
||||||
Key: "cloud.account.id",
|
|
||||||
DataType: v3.AttributeKeyDataTypeString,
|
|
||||||
Type: v3.AttributeKeyTypeResource,
|
|
||||||
},
|
|
||||||
Operator: "=",
|
|
||||||
Value: cloudAccountId,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Key: v3.AttributeKey{
|
|
||||||
Key: "aws.cloudwatch.log_group_name",
|
|
||||||
DataType: v3.AttributeKeyDataTypeString,
|
|
||||||
Type: v3.AttributeKeyTypeResource,
|
|
||||||
},
|
|
||||||
Operator: "like",
|
|
||||||
Value: logGroupNamePrefix + "%",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(Raj): Receive this as a param from UI in the future.
|
|
||||||
lookbackSeconds := int64(30 * 60)
|
|
||||||
|
|
||||||
qrParams := &v3.QueryRangeParamsV3{
|
|
||||||
Start: time.Now().UnixMilli() - (lookbackSeconds * 1000),
|
|
||||||
End: time.Now().UnixMilli(),
|
|
||||||
CompositeQuery: &v3.CompositeQuery{
|
|
||||||
PanelType: v3.PanelTypeList,
|
|
||||||
QueryType: v3.QueryTypeBuilder,
|
|
||||||
BuilderQueries: map[string]*v3.BuilderQuery{
|
|
||||||
"A": {
|
|
||||||
PageSize: 1,
|
|
||||||
Filters: logsConnTestFilter,
|
|
||||||
QueryName: "A",
|
|
||||||
DataSource: v3.DataSourceLogs,
|
|
||||||
Expression: "A",
|
|
||||||
AggregateOperator: v3.AggregateOperatorNoOp,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
queryRes, _, err := aH.querier.QueryRange(
|
|
||||||
ctx, orgID, qrParams,
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, model.InternalError(fmt.Errorf(
|
render.Error(w, err)
|
||||||
"could not query for integration connection status: %w", err,
|
return
|
||||||
))
|
|
||||||
}
|
|
||||||
if len(queryRes) > 0 && queryRes[0].List != nil && len(queryRes[0].List) > 0 {
|
|
||||||
lastLog := queryRes[0].List[0]
|
|
||||||
|
|
||||||
return &cloudintegrations.SignalConnectionStatus{
|
|
||||||
LastReceivedTsMillis: lastLog.Timestamp.UnixMilli(),
|
|
||||||
LastReceivedFrom: "signoz-aws-integration",
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aH *APIHandler) CloudIntegrationsUpdateServiceConfig(
|
|
||||||
w http.ResponseWriter, r *http.Request,
|
|
||||||
) {
|
|
||||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
|
||||||
serviceId := mux.Vars(r)["serviceId"]
|
serviceId := mux.Vars(r)["serviceId"]
|
||||||
|
|
||||||
req := cloudintegrations.UpdateServiceConfigRequest{}
|
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
||||||
RespondError(w, model.BadRequest(err), nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
|
|
||||||
if errv2 != nil {
|
|
||||||
render.Error(w, errv2)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := aH.CloudIntegrationsController.UpdateServiceConfig(
|
|
||||||
r.Context(), claims.OrgID, cloudProvider, serviceId, &req,
|
|
||||||
)
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
render.Error(w, err)
|
render.Error(w, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
aH.Respond(w, result)
|
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
reqBody, err := io.ReadAll(r.Body)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(w, errors.WrapInternalf(err,
|
||||||
|
errors.CodeInternal,
|
||||||
|
"failed to read update service config request body",
|
||||||
|
))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := aH.cloudIntegrationsRegistry[cloudProvider].UpdateServiceConfig(r.Context(), serviceId, orgID, reqBody)
|
||||||
|
if err != nil {
|
||||||
|
render.Error(w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
render.Success(w, http.StatusOK, result)
|
||||||
}
|
}
|
||||||
|
|
||||||
// logs
|
// logs
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||||
"github.com/SigNoz/signoz/pkg/types"
|
"github.com/SigNoz/signoz/pkg/types"
|
||||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||||
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
|
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
|
||||||
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
|
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
|
||||||
"github.com/SigNoz/signoz/pkg/valuer"
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
@@ -107,7 +108,7 @@ type IntegrationsListItem struct {
|
|||||||
|
|
||||||
type Integration struct {
|
type Integration struct {
|
||||||
IntegrationDetails
|
IntegrationDetails
|
||||||
Installation *types.InstalledIntegration `json:"installation"`
|
Installation *integrationtypes.InstalledIntegration `json:"installation"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Manager struct {
|
type Manager struct {
|
||||||
@@ -223,7 +224,7 @@ func (m *Manager) InstallIntegration(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
orgId string,
|
orgId string,
|
||||||
integrationId string,
|
integrationId string,
|
||||||
config types.InstalledIntegrationConfig,
|
config integrationtypes.InstalledIntegrationConfig,
|
||||||
) (*IntegrationsListItem, *model.ApiError) {
|
) (*IntegrationsListItem, *model.ApiError) {
|
||||||
integrationDetails, apiErr := m.getIntegrationDetails(ctx, integrationId)
|
integrationDetails, apiErr := m.getIntegrationDetails(ctx, integrationId)
|
||||||
if apiErr != nil {
|
if apiErr != nil {
|
||||||
@@ -429,7 +430,7 @@ func (m *Manager) getInstalledIntegration(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
orgId string,
|
orgId string,
|
||||||
integrationId string,
|
integrationId string,
|
||||||
) (*types.InstalledIntegration, *model.ApiError) {
|
) (*integrationtypes.InstalledIntegration, *model.ApiError) {
|
||||||
iis, apiErr := m.installedIntegrationsRepo.get(
|
iis, apiErr := m.installedIntegrationsRepo.get(
|
||||||
ctx, orgId, []string{integrationId},
|
ctx, orgId, []string{integrationId},
|
||||||
)
|
)
|
||||||
@@ -457,7 +458,7 @@ func (m *Manager) getInstalledIntegrations(
|
|||||||
return nil, apiErr
|
return nil, apiErr
|
||||||
}
|
}
|
||||||
|
|
||||||
installedTypes := utils.MapSlice(installations, func(i types.InstalledIntegration) string {
|
installedTypes := utils.MapSlice(installations, func(i integrationtypes.InstalledIntegration) string {
|
||||||
return i.Type
|
return i.Type
|
||||||
})
|
})
|
||||||
integrationDetails, apiErr := m.availableIntegrationsRepo.get(ctx, installedTypes)
|
integrationDetails, apiErr := m.availableIntegrationsRepo.get(ctx, installedTypes)
|
||||||
|
|||||||
@@ -4,22 +4,22 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||||
"github.com/SigNoz/signoz/pkg/types"
|
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||||
)
|
)
|
||||||
|
|
||||||
type InstalledIntegrationsRepo interface {
|
type InstalledIntegrationsRepo interface {
|
||||||
list(ctx context.Context, orgId string) ([]types.InstalledIntegration, *model.ApiError)
|
list(ctx context.Context, orgId string) ([]integrationtypes.InstalledIntegration, *model.ApiError)
|
||||||
|
|
||||||
get(
|
get(
|
||||||
ctx context.Context, orgId string, integrationTypes []string,
|
ctx context.Context, orgId string, integrationTypes []string,
|
||||||
) (map[string]types.InstalledIntegration, *model.ApiError)
|
) (map[string]integrationtypes.InstalledIntegration, *model.ApiError)
|
||||||
|
|
||||||
upsert(
|
upsert(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
orgId string,
|
orgId string,
|
||||||
integrationType string,
|
integrationType string,
|
||||||
config types.InstalledIntegrationConfig,
|
config integrationtypes.InstalledIntegrationConfig,
|
||||||
) (*types.InstalledIntegration, *model.ApiError)
|
) (*integrationtypes.InstalledIntegration, *model.ApiError)
|
||||||
|
|
||||||
delete(ctx context.Context, orgId string, integrationType string) *model.ApiError
|
delete(ctx context.Context, orgId string, integrationType string) *model.ApiError
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||||
"github.com/SigNoz/signoz/pkg/types"
|
"github.com/SigNoz/signoz/pkg/types"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||||
"github.com/SigNoz/signoz/pkg/valuer"
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
"github.com/uptrace/bun"
|
"github.com/uptrace/bun"
|
||||||
)
|
)
|
||||||
@@ -26,8 +27,8 @@ func NewInstalledIntegrationsSqliteRepo(store sqlstore.SQLStore) (
|
|||||||
func (r *InstalledIntegrationsSqliteRepo) list(
|
func (r *InstalledIntegrationsSqliteRepo) list(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
orgId string,
|
orgId string,
|
||||||
) ([]types.InstalledIntegration, *model.ApiError) {
|
) ([]integrationtypes.InstalledIntegration, *model.ApiError) {
|
||||||
integrations := []types.InstalledIntegration{}
|
integrations := []integrationtypes.InstalledIntegration{}
|
||||||
|
|
||||||
err := r.store.BunDB().NewSelect().
|
err := r.store.BunDB().NewSelect().
|
||||||
Model(&integrations).
|
Model(&integrations).
|
||||||
@@ -44,8 +45,8 @@ func (r *InstalledIntegrationsSqliteRepo) list(
|
|||||||
|
|
||||||
func (r *InstalledIntegrationsSqliteRepo) get(
|
func (r *InstalledIntegrationsSqliteRepo) get(
|
||||||
ctx context.Context, orgId string, integrationTypes []string,
|
ctx context.Context, orgId string, integrationTypes []string,
|
||||||
) (map[string]types.InstalledIntegration, *model.ApiError) {
|
) (map[string]integrationtypes.InstalledIntegration, *model.ApiError) {
|
||||||
integrations := []types.InstalledIntegration{}
|
integrations := []integrationtypes.InstalledIntegration{}
|
||||||
|
|
||||||
typeValues := []interface{}{}
|
typeValues := []interface{}{}
|
||||||
for _, integrationType := range integrationTypes {
|
for _, integrationType := range integrationTypes {
|
||||||
@@ -62,7 +63,7 @@ func (r *InstalledIntegrationsSqliteRepo) get(
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
result := map[string]types.InstalledIntegration{}
|
result := map[string]integrationtypes.InstalledIntegration{}
|
||||||
for _, ii := range integrations {
|
for _, ii := range integrations {
|
||||||
result[ii.Type] = ii
|
result[ii.Type] = ii
|
||||||
}
|
}
|
||||||
@@ -74,10 +75,10 @@ func (r *InstalledIntegrationsSqliteRepo) upsert(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
orgId string,
|
orgId string,
|
||||||
integrationType string,
|
integrationType string,
|
||||||
config types.InstalledIntegrationConfig,
|
config integrationtypes.InstalledIntegrationConfig,
|
||||||
) (*types.InstalledIntegration, *model.ApiError) {
|
) (*integrationtypes.InstalledIntegration, *model.ApiError) {
|
||||||
|
|
||||||
integration := types.InstalledIntegration{
|
integration := integrationtypes.InstalledIntegration{
|
||||||
Identifiable: types.Identifiable{
|
Identifiable: types.Identifiable{
|
||||||
ID: valuer.GenerateUUID(),
|
ID: valuer.GenerateUUID(),
|
||||||
},
|
},
|
||||||
@@ -114,7 +115,7 @@ func (r *InstalledIntegrationsSqliteRepo) delete(
|
|||||||
ctx context.Context, orgId string, integrationType string,
|
ctx context.Context, orgId string, integrationType string,
|
||||||
) *model.ApiError {
|
) *model.ApiError {
|
||||||
_, dbErr := r.store.BunDB().NewDelete().
|
_, dbErr := r.store.BunDB().NewDelete().
|
||||||
Model(&types.InstalledIntegration{}).
|
Model(&integrationtypes.InstalledIntegration{}).
|
||||||
Where("type = ?", integrationType).
|
Where("type = ?", integrationType).
|
||||||
Where("org_id = ?", orgId).
|
Where("org_id = ?", orgId).
|
||||||
Exec(ctx)
|
Exec(ctx)
|
||||||
|
|||||||
@@ -25,7 +25,6 @@ import (
|
|||||||
"github.com/SigNoz/signoz/pkg/querier"
|
"github.com/SigNoz/signoz/pkg/querier"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
|
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
|
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
|
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
|
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
|
||||||
"github.com/SigNoz/signoz/pkg/query-service/app/opamp"
|
"github.com/SigNoz/signoz/pkg/query-service/app/opamp"
|
||||||
@@ -70,11 +69,6 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
cloudIntegrationsController, err := cloudintegrations.NewController(signoz.SQLStore)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cacheForTraceDetail, err := memorycache.New(context.TODO(), signoz.Instrumentation.ToProviderSettings(), cache.Config{
|
cacheForTraceDetail, err := memorycache.New(context.TODO(), signoz.Instrumentation.ToProviderSettings(), cache.Config{
|
||||||
Provider: "memory",
|
Provider: "memory",
|
||||||
Memory: cache.Memory{
|
Memory: cache.Memory{
|
||||||
@@ -126,13 +120,13 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
|
|||||||
Reader: reader,
|
Reader: reader,
|
||||||
RuleManager: rm,
|
RuleManager: rm,
|
||||||
IntegrationsController: integrationsController,
|
IntegrationsController: integrationsController,
|
||||||
CloudIntegrationsController: cloudIntegrationsController,
|
|
||||||
LogsParsingPipelineController: logParsingPipelineController,
|
LogsParsingPipelineController: logParsingPipelineController,
|
||||||
FluxInterval: config.Querier.FluxInterval,
|
FluxInterval: config.Querier.FluxInterval,
|
||||||
AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager),
|
AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager),
|
||||||
LicensingAPI: nooplicensing.NewLicenseAPI(),
|
LicensingAPI: nooplicensing.NewLicenseAPI(),
|
||||||
Signoz: signoz,
|
Signoz: signoz,
|
||||||
QueryParserAPI: queryparser.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.QueryParser),
|
QueryParserAPI: queryparser.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.QueryParser),
|
||||||
|
Logger: signoz.Instrumentation.Logger(),
|
||||||
}, config)
|
}, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
13
pkg/query-service/utils/recovery.go
Normal file
13
pkg/query-service/utils/recovery.go
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime/debug"
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecoverPanic(callback func(err interface{}, stack []byte)) {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
if callback != nil {
|
||||||
|
callback(r, debug.Stack())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -169,6 +169,7 @@ func NewSQLMigrationProviderFactories(
|
|||||||
sqlmigration.NewAddAnonymousPublicDashboardTransactionFactory(sqlstore),
|
sqlmigration.NewAddAnonymousPublicDashboardTransactionFactory(sqlstore),
|
||||||
sqlmigration.NewAddRootUserFactory(sqlstore, sqlschema),
|
sqlmigration.NewAddRootUserFactory(sqlstore, sqlschema),
|
||||||
sqlmigration.NewAddUserEmailOrgIDIndexFactory(sqlstore, sqlschema),
|
sqlmigration.NewAddUserEmailOrgIDIndexFactory(sqlstore, sqlschema),
|
||||||
|
sqlmigration.NewMigrateRulesV4ToV5Factory(sqlstore, telemetryStore),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
209
pkg/sqlmigration/066_migrate_rules_v4_to_v5_post_deprecation.go
Normal file
209
pkg/sqlmigration/066_migrate_rules_v4_to_v5_post_deprecation.go
Normal file
@@ -0,0 +1,209 @@
|
|||||||
|
package sqlmigration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"encoding/json"
|
||||||
|
"log/slog"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/factory"
|
||||||
|
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||||
|
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||||
|
"github.com/SigNoz/signoz/pkg/transition"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
"github.com/uptrace/bun/migrate"
|
||||||
|
)
|
||||||
|
|
||||||
|
type migrateRulesV4ToV5 struct {
|
||||||
|
store sqlstore.SQLStore
|
||||||
|
telemetryStore telemetrystore.TelemetryStore
|
||||||
|
logger *slog.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMigrateRulesV4ToV5Factory(
|
||||||
|
store sqlstore.SQLStore,
|
||||||
|
telemetryStore telemetrystore.TelemetryStore,
|
||||||
|
) factory.ProviderFactory[SQLMigration, Config] {
|
||||||
|
return factory.NewProviderFactory(
|
||||||
|
factory.MustNewName("migrate_rules_post_deprecation"),
|
||||||
|
func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
|
||||||
|
return &migrateRulesV4ToV5{
|
||||||
|
store: store,
|
||||||
|
telemetryStore: telemetryStore,
|
||||||
|
logger: ps.Logger,
|
||||||
|
}, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (migration *migrateRulesV4ToV5) Register(migrations *migrate.Migrations) error {
|
||||||
|
if err := migrations.Register(migration.Up, migration.Down); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (migration *migrateRulesV4ToV5) getLogDuplicateKeys(ctx context.Context) ([]string, error) {
|
||||||
|
query := `
|
||||||
|
SELECT name
|
||||||
|
FROM (
|
||||||
|
SELECT DISTINCT name FROM signoz_logs.distributed_logs_attribute_keys
|
||||||
|
INTERSECT
|
||||||
|
SELECT DISTINCT name FROM signoz_logs.distributed_logs_resource_keys
|
||||||
|
)
|
||||||
|
ORDER BY name
|
||||||
|
`
|
||||||
|
|
||||||
|
rows, err := migration.telemetryStore.ClickhouseDB().Query(ctx, query)
|
||||||
|
if err != nil {
|
||||||
|
migration.logger.WarnContext(ctx, "failed to query log duplicate keys", "error", err)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
var keys []string
|
||||||
|
for rows.Next() {
|
||||||
|
var key string
|
||||||
|
if err := rows.Scan(&key); err != nil {
|
||||||
|
migration.logger.WarnContext(ctx, "failed to scan log duplicate key", "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
return keys, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (migration *migrateRulesV4ToV5) getTraceDuplicateKeys(ctx context.Context) ([]string, error) {
|
||||||
|
query := `
|
||||||
|
SELECT tagKey
|
||||||
|
FROM signoz_traces.distributed_span_attributes_keys
|
||||||
|
WHERE tagType IN ('tag', 'resource')
|
||||||
|
GROUP BY tagKey
|
||||||
|
HAVING COUNT(DISTINCT tagType) > 1
|
||||||
|
ORDER BY tagKey
|
||||||
|
`
|
||||||
|
|
||||||
|
rows, err := migration.telemetryStore.ClickhouseDB().Query(ctx, query)
|
||||||
|
if err != nil {
|
||||||
|
migration.logger.WarnContext(ctx, "failed to query trace duplicate keys", "error", err)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
|
||||||
|
var keys []string
|
||||||
|
for rows.Next() {
|
||||||
|
var key string
|
||||||
|
if err := rows.Scan(&key); err != nil {
|
||||||
|
migration.logger.WarnContext(ctx, "failed to scan trace duplicate key", "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
return keys, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (migration *migrateRulesV4ToV5) Up(ctx context.Context, db *bun.DB) error {
|
||||||
|
logsKeys, err := migration.getLogDuplicateKeys(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
tracesKeys, err := migration.getTraceDuplicateKeys(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
tx, err := db.BeginTx(ctx, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
_ = tx.Rollback()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var rules []struct {
|
||||||
|
ID string `bun:"id"`
|
||||||
|
Data map[string]any `bun:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tx.NewSelect().
|
||||||
|
Table("rule").
|
||||||
|
Column("id", "data").
|
||||||
|
Scan(ctx, &rules)
|
||||||
|
if err != nil {
|
||||||
|
if err == sql.ErrNoRows {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
alertsMigrator := transition.NewAlertMigrateV5(migration.logger, logsKeys, tracesKeys)
|
||||||
|
|
||||||
|
count := 0
|
||||||
|
|
||||||
|
for _, rule := range rules {
|
||||||
|
version, _ := rule.Data["version"].(string)
|
||||||
|
|
||||||
|
if version == "v5" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if version == "" {
|
||||||
|
migration.logger.WarnContext(ctx, "unexpected empty version for rule", "rule_id", rule.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
migration.logger.InfoContext(ctx, "migrating rule v4 to v5", "rule_id", rule.ID, "current_version", version)
|
||||||
|
|
||||||
|
// Check if the queries envelope already exists and is non-empty
|
||||||
|
hasQueriesEnvelope := false
|
||||||
|
if condition, ok := rule.Data["condition"].(map[string]any); ok {
|
||||||
|
if compositeQuery, ok := condition["compositeQuery"].(map[string]any); ok {
|
||||||
|
if queries, ok := compositeQuery["queries"].([]any); ok && len(queries) > 0 {
|
||||||
|
hasQueriesEnvelope = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasQueriesEnvelope {
|
||||||
|
// already has queries envelope, just bump version
|
||||||
|
// this is because user made a mistake of choosing version
|
||||||
|
migration.logger.InfoContext(ctx, "rule already has queries envelope, bumping version", "rule_id", rule.ID)
|
||||||
|
rule.Data["version"] = "v5"
|
||||||
|
} else {
|
||||||
|
// old format, run full migration
|
||||||
|
migration.logger.InfoContext(ctx, "rule has old format, running full migration", "rule_id", rule.ID)
|
||||||
|
updated := alertsMigrator.Migrate(ctx, rule.Data)
|
||||||
|
if !updated {
|
||||||
|
migration.logger.WarnContext(ctx, "expected updated to be true but got false", "rule_id", rule.ID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
rule.Data["version"] = "v5"
|
||||||
|
}
|
||||||
|
|
||||||
|
dataJSON, err := json.Marshal(rule.Data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = tx.NewUpdate().
|
||||||
|
Table("rule").
|
||||||
|
Set("data = ?", string(dataJSON)).
|
||||||
|
Where("id = ?", rule.ID).
|
||||||
|
Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
if count != 0 {
|
||||||
|
migration.logger.InfoContext(ctx, "migrate v4 alerts", "count", count)
|
||||||
|
}
|
||||||
|
|
||||||
|
return tx.Commit()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (migration *migrateRulesV4ToV5) Down(ctx context.Context, db *bun.DB) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||||
"github.com/SigNoz/signoz-otel-collector/utils"
|
"github.com/SigNoz/signoz-otel-collector/utils"
|
||||||
@@ -61,10 +62,15 @@ var (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
type fieldMapper struct {}
|
type fieldMapper struct {
|
||||||
|
evolutionMetadataStore qbtypes.KeyEvolutionMetadataStore
|
||||||
|
}
|
||||||
|
|
||||||
func NewFieldMapper() qbtypes.FieldMapper {
|
func NewFieldMapper(evolutionMetadataStore qbtypes.KeyEvolutionMetadataStore) qbtypes.FieldMapper {
|
||||||
return &fieldMapper{}
|
// this can take evolution metadata as an argument and store it in the field mapper
|
||||||
|
return &fieldMapper{
|
||||||
|
evolutionMetadataStore: evolutionMetadataStore,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
func (m *fieldMapper) getColumn(_ context.Context, key *telemetrytypes.TelemetryFieldKey) (*schema.Column, error) {
|
func (m *fieldMapper) getColumn(_ context.Context, key *telemetrytypes.TelemetryFieldKey) (*schema.Column, error) {
|
||||||
switch key.FieldContext {
|
switch key.FieldContext {
|
||||||
@@ -150,12 +156,17 @@ func (m *fieldMapper) FieldFor(ctx context.Context, key *telemetrytypes.Telemetr
|
|||||||
default:
|
default:
|
||||||
return "", errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "only resource/body context fields are supported for json columns, got %s", key.FieldContext.String)
|
return "", errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "only resource/body context fields are supported for json columns, got %s", key.FieldContext.String)
|
||||||
}
|
}
|
||||||
case schema.ColumnTypeEnumLowCardinality:
|
|
||||||
switch elementType := column.Type.(schema.LowCardinalityColumnType).ElementType; elementType.GetType() {
|
baseColumn := logsV2Columns["resources_string"]
|
||||||
case schema.ColumnTypeEnumString:
|
tsStartTime := time.Unix(0, int64(tsStart))
|
||||||
return column.Name, nil
|
|
||||||
default:
|
// Check all evolutions for this key to see if any were released after tsStart.
|
||||||
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "exists operator is not supported for low cardinality column type %s", elementType)
|
// If so, it means the new column wasn't available yet at tsStart, so we need to check the old column.
|
||||||
|
evolutions := m.evolutionMetadataStore.Get(baseColumn.Name)
|
||||||
|
|
||||||
|
// restricting now to just one entry where we know we changes from map to json
|
||||||
|
if len(evolutions) > 0 && evolutions[0].ReleaseTime.After(tsStartTime) {
|
||||||
|
return fmt.Sprintf("%s.`%s`::String", column.Name, key.Name), nil
|
||||||
}
|
}
|
||||||
case schema.ColumnTypeEnumString,
|
case schema.ColumnTypeEnumString,
|
||||||
schema.ColumnTypeEnumUInt64, schema.ColumnTypeEnumUInt32, schema.ColumnTypeEnumUInt8:
|
schema.ColumnTypeEnumUInt64, schema.ColumnTypeEnumUInt32, schema.ColumnTypeEnumUInt8:
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package telemetrylogs
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||||
@@ -11,6 +12,38 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// mockKeyEvolutionMetadataStore is a mock implementation of KeyEvolutionMetadataStore for testing
|
||||||
|
type mockKeyEvolutionMetadataStore struct {
|
||||||
|
metadata map[string][]*qbtypes.KeyEvolutionMetadataKey
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMockKeyEvolutionMetadataStore() *mockKeyEvolutionMetadataStore {
|
||||||
|
return &mockKeyEvolutionMetadataStore{
|
||||||
|
metadata: make(map[string][]*qbtypes.KeyEvolutionMetadataKey),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockKeyEvolutionMetadataStore) Get(keyName string) []*qbtypes.KeyEvolutionMetadataKey {
|
||||||
|
if m.metadata == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
keys, exists := m.metadata[keyName]
|
||||||
|
if !exists {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Return a copy to prevent external modification
|
||||||
|
result := make([]*qbtypes.KeyEvolutionMetadataKey, len(keys))
|
||||||
|
copy(result, keys)
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockKeyEvolutionMetadataStore) Add(keyName string, key *qbtypes.KeyEvolutionMetadataKey) {
|
||||||
|
if m.metadata == nil {
|
||||||
|
m.metadata = make(map[string][]*qbtypes.KeyEvolutionMetadataKey)
|
||||||
|
}
|
||||||
|
m.metadata[keyName] = append(m.metadata[keyName], key)
|
||||||
|
}
|
||||||
|
|
||||||
func TestGetColumn(t *testing.T) {
|
func TestGetColumn(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
@@ -164,7 +197,8 @@ func TestGetColumn(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
fm := NewFieldMapper()
|
mockStore := newMockKeyEvolutionMetadataStore()
|
||||||
|
fm := NewFieldMapper(mockStore)
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
@@ -189,45 +223,45 @@ func TestGetFieldKeyName(t *testing.T) {
|
|||||||
expectedResult string
|
expectedResult string
|
||||||
expectedError error
|
expectedError error
|
||||||
}{
|
}{
|
||||||
{
|
// {
|
||||||
name: "Simple column type - timestamp",
|
// name: "Simple column type - timestamp",
|
||||||
key: telemetrytypes.TelemetryFieldKey{
|
// key: telemetrytypes.TelemetryFieldKey{
|
||||||
Name: "timestamp",
|
// Name: "timestamp",
|
||||||
FieldContext: telemetrytypes.FieldContextLog,
|
// FieldContext: telemetrytypes.FieldContextLog,
|
||||||
},
|
// },
|
||||||
expectedResult: "timestamp",
|
// expectedResult: "timestamp",
|
||||||
expectedError: nil,
|
// expectedError: nil,
|
||||||
},
|
// },
|
||||||
{
|
// {
|
||||||
name: "Map column type - string attribute",
|
// name: "Map column type - string attribute",
|
||||||
key: telemetrytypes.TelemetryFieldKey{
|
// key: telemetrytypes.TelemetryFieldKey{
|
||||||
Name: "user.id",
|
// Name: "user.id",
|
||||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
// FieldContext: telemetrytypes.FieldContextAttribute,
|
||||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
// FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||||
},
|
// },
|
||||||
expectedResult: "attributes_string['user.id']",
|
// expectedResult: "attributes_string['user.id']",
|
||||||
expectedError: nil,
|
// expectedError: nil,
|
||||||
},
|
// },
|
||||||
{
|
// {
|
||||||
name: "Map column type - number attribute",
|
// name: "Map column type - number attribute",
|
||||||
key: telemetrytypes.TelemetryFieldKey{
|
// key: telemetrytypes.TelemetryFieldKey{
|
||||||
Name: "request.size",
|
// Name: "request.size",
|
||||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
// FieldContext: telemetrytypes.FieldContextAttribute,
|
||||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
// FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||||
},
|
// },
|
||||||
expectedResult: "attributes_number['request.size']",
|
// expectedResult: "attributes_number['request.size']",
|
||||||
expectedError: nil,
|
// expectedError: nil,
|
||||||
},
|
// },
|
||||||
{
|
// {
|
||||||
name: "Map column type - bool attribute",
|
// name: "Map column type - bool attribute",
|
||||||
key: telemetrytypes.TelemetryFieldKey{
|
// key: telemetrytypes.TelemetryFieldKey{
|
||||||
Name: "request.success",
|
// Name: "request.success",
|
||||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
// FieldContext: telemetrytypes.FieldContextAttribute,
|
||||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
// FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||||
},
|
// },
|
||||||
expectedResult: "attributes_bool['request.success']",
|
// expectedResult: "attributes_bool['request.success']",
|
||||||
expectedError: nil,
|
// expectedError: nil,
|
||||||
},
|
// },
|
||||||
{
|
{
|
||||||
name: "Map column type - resource attribute",
|
name: "Map column type - resource attribute",
|
||||||
key: telemetrytypes.TelemetryFieldKey{
|
key: telemetrytypes.TelemetryFieldKey{
|
||||||
@@ -237,32 +271,168 @@ func TestGetFieldKeyName(t *testing.T) {
|
|||||||
expectedResult: "multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL)",
|
expectedResult: "multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL)",
|
||||||
expectedError: nil,
|
expectedError: nil,
|
||||||
},
|
},
|
||||||
{
|
// {
|
||||||
name: "Map column type - resource attribute - Materialized",
|
// name: "Map column type - resource attribute - Materialized",
|
||||||
key: telemetrytypes.TelemetryFieldKey{
|
// key: telemetrytypes.TelemetryFieldKey{
|
||||||
Name: "service.name",
|
// Name: "service.name",
|
||||||
FieldContext: telemetrytypes.FieldContextResource,
|
// FieldContext: telemetrytypes.FieldContextResource,
|
||||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
// FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||||
Materialized: true,
|
// Materialized: true,
|
||||||
},
|
// },
|
||||||
expectedResult: "multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, `resource_string_service$$name_exists`==true, `resource_string_service$$name`, NULL)",
|
// expectedResult: "multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, `resource_string_service$$name_exists`==true, `resource_string_service$$name`, NULL)",
|
||||||
expectedError: nil,
|
// expectedError: nil,
|
||||||
},
|
// },
|
||||||
{
|
// {
|
||||||
name: "Non-existent column",
|
// name: "Map column type - resource attribute - json",
|
||||||
key: telemetrytypes.TelemetryFieldKey{
|
// tsStart: uint64(time.Now().Add(10 * time.Second).UnixNano()),
|
||||||
Name: "nonexistent_field",
|
// tsEnd: uint64(time.Now().Add(20 * time.Second).UnixNano()),
|
||||||
FieldContext: telemetrytypes.FieldContextLog,
|
// key: telemetrytypes.TelemetryFieldKey{
|
||||||
},
|
// Name: "service.name",
|
||||||
expectedResult: "",
|
// FieldContext: telemetrytypes.FieldContextResource,
|
||||||
expectedError: qbtypes.ErrColumnNotFound,
|
// },
|
||||||
},
|
// expectedResult: "resource.`service.name`::String",
|
||||||
|
// expectedError: nil,
|
||||||
|
// },
|
||||||
|
// {
|
||||||
|
// name: "Map column type - resource attribute - Materialized - json",
|
||||||
|
// tsStart: uint64(time.Now().Add(10 * time.Second).UnixNano()),
|
||||||
|
// tsEnd: uint64(time.Now().Add(20 * time.Second).UnixNano()),
|
||||||
|
// key: telemetrytypes.TelemetryFieldKey{
|
||||||
|
// Name: "service.name",
|
||||||
|
// FieldContext: telemetrytypes.FieldContextResource,
|
||||||
|
// FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||||
|
// Materialized: true,
|
||||||
|
// },
|
||||||
|
// expectedResult: "resource.`service.name`::String",
|
||||||
|
// expectedError: nil,
|
||||||
|
// },
|
||||||
|
// {
|
||||||
|
// name: "Non-existent column",
|
||||||
|
// key: telemetrytypes.TelemetryFieldKey{
|
||||||
|
// Name: "nonexistent_field",
|
||||||
|
// FieldContext: telemetrytypes.FieldContextLog,
|
||||||
|
// },
|
||||||
|
// expectedResult: "",
|
||||||
|
// expectedError: qbtypes.ErrColumnNotFound,
|
||||||
|
// },
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
fm := NewFieldMapper()
|
mockStore := newMockKeyEvolutionMetadataStore()
|
||||||
result, err := fm.FieldFor(ctx, &tc.key)
|
fm := NewFieldMapper(mockStore)
|
||||||
|
result, err := fm.FieldFor(ctx, tc.tsStart, tc.tsEnd, &tc.key)
|
||||||
|
|
||||||
|
if tc.expectedError != nil {
|
||||||
|
assert.Equal(t, tc.expectedError, err)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, tc.expectedResult, result)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFieldForWithEvolutionMetadata(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Create a test release time
|
||||||
|
releaseTime := time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC)
|
||||||
|
releaseTimeNano := uint64(releaseTime.UnixNano())
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
tsStart uint64
|
||||||
|
tsEnd uint64
|
||||||
|
key telemetrytypes.TelemetryFieldKey
|
||||||
|
setupMock func(*mockKeyEvolutionMetadataStore)
|
||||||
|
expectedResult string
|
||||||
|
expectedError error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Resource attribute - tsStart before release time (use new JSON column only)",
|
||||||
|
tsStart: releaseTimeNano - uint64(24*time.Hour.Nanoseconds()), // 1 day before release
|
||||||
|
tsEnd: releaseTimeNano + uint64(24*time.Hour.Nanoseconds()),
|
||||||
|
key: telemetrytypes.TelemetryFieldKey{
|
||||||
|
Name: "service.name",
|
||||||
|
FieldContext: telemetrytypes.FieldContextResource,
|
||||||
|
},
|
||||||
|
setupMock: func(m *mockKeyEvolutionMetadataStore) {
|
||||||
|
m.Add("resources_string", &qbtypes.KeyEvolutionMetadataKey{
|
||||||
|
BaseColumn: "resources_string",
|
||||||
|
BaseColumnType: "Map(LowCardinality(String), String)",
|
||||||
|
NewColumn: "resource",
|
||||||
|
NewColumnType: "JSON(max_dynamic_paths=100)",
|
||||||
|
ReleaseTime: releaseTime,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
expectedResult: "resource.`service.name`::String",
|
||||||
|
expectedError: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Resource attribute - tsStart after release time (use fallback with multiIf)",
|
||||||
|
tsStart: releaseTimeNano + uint64(24*time.Hour.Nanoseconds()), // 1 day after release
|
||||||
|
tsEnd: releaseTimeNano + uint64(48*time.Hour.Nanoseconds()),
|
||||||
|
key: telemetrytypes.TelemetryFieldKey{
|
||||||
|
Name: "service.name",
|
||||||
|
FieldContext: telemetrytypes.FieldContextResource,
|
||||||
|
},
|
||||||
|
setupMock: func(m *mockKeyEvolutionMetadataStore) {
|
||||||
|
m.Add("resources_string", &qbtypes.KeyEvolutionMetadataKey{
|
||||||
|
BaseColumn: "resources_string",
|
||||||
|
BaseColumnType: "Map(LowCardinality(String), String)",
|
||||||
|
NewColumn: "resource",
|
||||||
|
NewColumnType: "JSON(max_dynamic_paths=100)",
|
||||||
|
ReleaseTime: releaseTime,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
expectedResult: "multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL)",
|
||||||
|
expectedError: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Resource attribute - no evolution metadata (use fallback with multiIf)",
|
||||||
|
tsStart: releaseTimeNano,
|
||||||
|
tsEnd: releaseTimeNano + uint64(24*time.Hour.Nanoseconds()),
|
||||||
|
key: telemetrytypes.TelemetryFieldKey{
|
||||||
|
Name: "service.name",
|
||||||
|
FieldContext: telemetrytypes.FieldContextResource,
|
||||||
|
},
|
||||||
|
setupMock: func(m *mockKeyEvolutionMetadataStore) {
|
||||||
|
// No metadata added - empty mock
|
||||||
|
},
|
||||||
|
expectedResult: "multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL)",
|
||||||
|
expectedError: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Resource attribute - tsStart exactly at release time (use fallback with multiIf)",
|
||||||
|
tsStart: releaseTimeNano,
|
||||||
|
tsEnd: releaseTimeNano + uint64(24*time.Hour.Nanoseconds()),
|
||||||
|
key: telemetrytypes.TelemetryFieldKey{
|
||||||
|
Name: "service.name",
|
||||||
|
FieldContext: telemetrytypes.FieldContextResource,
|
||||||
|
},
|
||||||
|
setupMock: func(m *mockKeyEvolutionMetadataStore) {
|
||||||
|
m.Add("resources_string", &qbtypes.KeyEvolutionMetadataKey{
|
||||||
|
BaseColumn: "resources_string",
|
||||||
|
BaseColumnType: "Map(LowCardinality(String), String)",
|
||||||
|
NewColumn: "resource",
|
||||||
|
NewColumnType: "JSON(max_dynamic_paths=100)",
|
||||||
|
ReleaseTime: releaseTime,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
expectedResult: "multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL)",
|
||||||
|
expectedError: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
mockStore := newMockKeyEvolutionMetadataStore()
|
||||||
|
if tc.setupMock != nil {
|
||||||
|
tc.setupMock(mockStore)
|
||||||
|
}
|
||||||
|
fm := NewFieldMapper(mockStore)
|
||||||
|
result, err := fm.FieldFor(ctx, tc.tsStart, tc.tsEnd, &tc.key)
|
||||||
|
|
||||||
if tc.expectedError != nil {
|
if tc.expectedError != nil {
|
||||||
assert.Equal(t, tc.expectedError, err)
|
assert.Equal(t, tc.expectedError, err)
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ import (
|
|||||||
"github.com/ClickHouse/clickhouse-go/v2/lib/chcol"
|
"github.com/ClickHouse/clickhouse-go/v2/lib/chcol"
|
||||||
schemamigrator "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
schemamigrator "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||||
"github.com/SigNoz/signoz-otel-collector/constants"
|
"github.com/SigNoz/signoz-otel-collector/constants"
|
||||||
"github.com/SigNoz/signoz-otel-collector/utils"
|
|
||||||
"github.com/SigNoz/signoz/pkg/errors"
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
"github.com/SigNoz/signoz/pkg/querybuilder"
|
"github.com/SigNoz/signoz/pkg/querybuilder"
|
||||||
"github.com/SigNoz/signoz/pkg/telemetrylogs"
|
"github.com/SigNoz/signoz/pkg/telemetrylogs"
|
||||||
@@ -113,7 +112,7 @@ func (t *telemetryMetaStore) buildBodyJSONPaths(ctx context.Context,
|
|||||||
|
|
||||||
for _, fieldKey := range fieldKeys {
|
for _, fieldKey := range fieldKeys {
|
||||||
promotedKey := strings.Split(fieldKey.Name, telemetrytypes.ArraySep)[0]
|
promotedKey := strings.Split(fieldKey.Name, telemetrytypes.ArraySep)[0]
|
||||||
fieldKey.Materialized = promoted.Contains(promotedKey)
|
fieldKey.Materialized = promoted[promotedKey]
|
||||||
fieldKey.Indexes = indexes[fieldKey.Name]
|
fieldKey.Indexes = indexes[fieldKey.Name]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -295,33 +294,6 @@ func (t *telemetryMetaStore) ListLogsJSONIndexes(ctx context.Context, filters ..
|
|||||||
return indexes, nil
|
return indexes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *telemetryMetaStore) ListPromotedPaths(ctx context.Context, paths ...string) (map[string]struct{}, error) {
|
|
||||||
sb := sqlbuilder.Select("path").From(fmt.Sprintf("%s.%s", DBName, PromotedPathsTableName))
|
|
||||||
pathConditions := []string{}
|
|
||||||
for _, path := range paths {
|
|
||||||
pathConditions = append(pathConditions, sb.Equal("path", path))
|
|
||||||
}
|
|
||||||
sb.Where(sb.Or(pathConditions...))
|
|
||||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
|
||||||
|
|
||||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.WrapInternalf(err, CodeFailLoadPromotedPaths, "failed to load promoted paths")
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
|
|
||||||
next := make(map[string]struct{})
|
|
||||||
for rows.Next() {
|
|
||||||
var path string
|
|
||||||
if err := rows.Scan(&path); err != nil {
|
|
||||||
return nil, errors.WrapInternalf(err, CodeFailLoadPromotedPaths, "failed to scan promoted path")
|
|
||||||
}
|
|
||||||
next[path] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
return next, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(Piyush): Remove this if not used in future
|
// TODO(Piyush): Remove this if not used in future
|
||||||
func (t *telemetryMetaStore) ListJSONValues(ctx context.Context, path string, limit int) (*telemetrytypes.TelemetryFieldValues, bool, error) {
|
func (t *telemetryMetaStore) ListJSONValues(ctx context.Context, path string, limit int) (*telemetrytypes.TelemetryFieldValues, bool, error) {
|
||||||
path = CleanPathPrefixes(path)
|
path = CleanPathPrefixes(path)
|
||||||
@@ -484,11 +456,12 @@ func derefValue(v any) any {
|
|||||||
return val.Interface()
|
return val.Interface()
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsPathPromoted checks if a specific path is promoted
|
// IsPathPromoted checks if a specific path is promoted (Column Evolution table: field_name for logs body).
|
||||||
func (t *telemetryMetaStore) IsPathPromoted(ctx context.Context, path string) (bool, error) {
|
func (t *telemetryMetaStore) IsPathPromoted(ctx context.Context, path string) (bool, error) {
|
||||||
split := strings.Split(path, telemetrytypes.ArraySep)
|
split := strings.Split(path, telemetrytypes.ArraySep)
|
||||||
query := fmt.Sprintf("SELECT 1 FROM %s.%s WHERE path = ? LIMIT 1", DBName, PromotedPathsTableName)
|
pathSegment := split[0]
|
||||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, split[0])
|
query := fmt.Sprintf("SELECT 1 FROM %s.%s WHERE signal = ? AND column_name = ? AND field_context = ? AND field_name = ? LIMIT 1", DBName, PromotedPathsTableName)
|
||||||
|
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, telemetrytypes.SignalLogs, telemetrylogs.LogsV2BodyPromotedColumn, telemetrytypes.FieldContextBody, pathSegment)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.WrapInternalf(err, CodeFailCheckPathPromoted, "failed to check if path %s is promoted", path)
|
return false, errors.WrapInternalf(err, CodeFailCheckPathPromoted, "failed to check if path %s is promoted", path)
|
||||||
}
|
}
|
||||||
@@ -497,15 +470,24 @@ func (t *telemetryMetaStore) IsPathPromoted(ctx context.Context, path string) (b
|
|||||||
return rows.Next(), nil
|
return rows.Next(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPromotedPaths checks if a specific path is promoted
|
// GetPromotedPaths returns promoted paths from the Column Evolution table (field_name for logs body).
|
||||||
func (t *telemetryMetaStore) GetPromotedPaths(ctx context.Context, paths ...string) (*utils.ConcurrentSet[string], error) {
|
func (t *telemetryMetaStore) GetPromotedPaths(ctx context.Context, paths ...string) (map[string]bool, error) {
|
||||||
sb := sqlbuilder.Select("path").From(fmt.Sprintf("%s.%s", DBName, PromotedPathsTableName))
|
sb := sqlbuilder.Select("field_name").From(fmt.Sprintf("%s.%s", DBName, PromotedPathsTableName))
|
||||||
pathConditions := []string{}
|
conditions := []string{
|
||||||
for _, path := range paths {
|
sb.Equal("signal", telemetrytypes.SignalLogs),
|
||||||
split := strings.Split(path, telemetrytypes.ArraySep)
|
sb.Equal("column_name", telemetrylogs.LogsV2BodyPromotedColumn),
|
||||||
pathConditions = append(pathConditions, sb.Equal("path", split[0]))
|
sb.Equal("field_context", telemetrytypes.FieldContextBody),
|
||||||
|
sb.NotEqual("field_name", "__all__"),
|
||||||
}
|
}
|
||||||
sb.Where(sb.Or(pathConditions...))
|
if len(paths) > 0 {
|
||||||
|
pathArgs := make([]interface{}, len(paths))
|
||||||
|
for i, path := range paths {
|
||||||
|
split := strings.Split(path, telemetrytypes.ArraySep)
|
||||||
|
pathArgs[i] = split[0]
|
||||||
|
}
|
||||||
|
conditions = append(conditions, sb.In("field_name", pathArgs))
|
||||||
|
}
|
||||||
|
sb.Where(sb.And(conditions...))
|
||||||
|
|
||||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||||
@@ -514,13 +496,13 @@ func (t *telemetryMetaStore) GetPromotedPaths(ctx context.Context, paths ...stri
|
|||||||
}
|
}
|
||||||
defer rows.Close()
|
defer rows.Close()
|
||||||
|
|
||||||
promotedPaths := utils.NewConcurrentSet[string]()
|
promotedPaths := make(map[string]bool)
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var path string
|
var fieldName string
|
||||||
if err := rows.Scan(&path); err != nil {
|
if err := rows.Scan(&fieldName); err != nil {
|
||||||
return nil, errors.WrapInternalf(err, CodeFailCheckPathPromoted, "failed to scan promoted path")
|
return nil, errors.WrapInternalf(err, CodeFailCheckPathPromoted, "failed to scan promoted path")
|
||||||
}
|
}
|
||||||
promotedPaths.Insert(path)
|
promotedPaths[fieldName] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
return promotedPaths, nil
|
return promotedPaths, nil
|
||||||
@@ -534,21 +516,22 @@ func CleanPathPrefixes(path string) string {
|
|||||||
return path
|
return path
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PromotePaths inserts promoted paths into the Column Evolution table (same schema as signoz-otel-collector metadata_migrations).
|
||||||
func (t *telemetryMetaStore) PromotePaths(ctx context.Context, paths ...string) error {
|
func (t *telemetryMetaStore) PromotePaths(ctx context.Context, paths ...string) error {
|
||||||
batch, err := t.telemetrystore.ClickhouseDB().PrepareBatch(ctx,
|
batch, err := t.telemetrystore.ClickhouseDB().PrepareBatch(ctx,
|
||||||
fmt.Sprintf("INSERT INTO %s.%s (path, created_at) VALUES", DBName,
|
fmt.Sprintf("INSERT INTO %s.%s (signal, column_name, column_type, field_context, field_name, version, release_time) VALUES", DBName,
|
||||||
PromotedPathsTableName))
|
PromotedPathsTableName))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WrapInternalf(err, CodeFailedToPrepareBatch, "failed to prepare batch")
|
return errors.WrapInternalf(err, CodeFailedToPrepareBatch, "failed to prepare batch")
|
||||||
}
|
}
|
||||||
|
|
||||||
nowMs := uint64(time.Now().UnixMilli())
|
releaseTime := time.Now().UnixNano()
|
||||||
for _, p := range paths {
|
for _, p := range paths {
|
||||||
trimmed := strings.TrimSpace(p)
|
trimmed := strings.TrimSpace(p)
|
||||||
if trimmed == "" {
|
if trimmed == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := batch.Append(trimmed, nowMs); err != nil {
|
if err := batch.Append(telemetrytypes.SignalLogs, telemetrylogs.LogsV2BodyPromotedColumn, "JSON()", telemetrytypes.FieldContextBody, trimmed, 0, releaseTime); err != nil {
|
||||||
_ = batch.Abort()
|
_ = batch.Abort()
|
||||||
return errors.WrapInternalf(err, CodeFailedToAppendPath, "failed to append path")
|
return errors.WrapInternalf(err, CodeFailedToAppendPath, "failed to append path")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ const (
|
|||||||
AttributesMetadataTableName = "distributed_attributes_metadata"
|
AttributesMetadataTableName = "distributed_attributes_metadata"
|
||||||
AttributesMetadataLocalTableName = "attributes_metadata"
|
AttributesMetadataLocalTableName = "attributes_metadata"
|
||||||
PathTypesTableName = otelcollectorconst.DistributedPathTypesTable
|
PathTypesTableName = otelcollectorconst.DistributedPathTypesTable
|
||||||
PromotedPathsTableName = otelcollectorconst.DistributedPromotedPathsTable
|
// Column Evolution table stores promoted paths as (signal, column_name, field_context, field_name); see signoz-otel-collector metadata_migrations.
|
||||||
|
PromotedPathsTableName = "distributed_column_evolution_metadata"
|
||||||
SkipIndexTableName = "system.data_skipping_indices"
|
SkipIndexTableName = "system.data_skipping_indices"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,247 +0,0 @@
|
|||||||
package types
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql/driver"
|
|
||||||
"encoding/json"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/SigNoz/signoz/pkg/errors"
|
|
||||||
"github.com/uptrace/bun"
|
|
||||||
)
|
|
||||||
|
|
||||||
type IntegrationUserEmail string
|
|
||||||
|
|
||||||
const (
|
|
||||||
AWSIntegrationUserEmail IntegrationUserEmail = "aws-integration@signoz.io"
|
|
||||||
)
|
|
||||||
|
|
||||||
var AllIntegrationUserEmails = []IntegrationUserEmail{
|
|
||||||
AWSIntegrationUserEmail,
|
|
||||||
}
|
|
||||||
|
|
||||||
// --------------------------------------------------------------------------
|
|
||||||
// Normal integration uses just the installed_integration table
|
|
||||||
// --------------------------------------------------------------------------
|
|
||||||
|
|
||||||
type InstalledIntegration struct {
|
|
||||||
bun.BaseModel `bun:"table:installed_integration"`
|
|
||||||
|
|
||||||
Identifiable
|
|
||||||
Type string `json:"type" bun:"type,type:text,unique:org_id_type"`
|
|
||||||
Config InstalledIntegrationConfig `json:"config" bun:"config,type:text"`
|
|
||||||
InstalledAt time.Time `json:"installed_at" bun:"installed_at,default:current_timestamp"`
|
|
||||||
OrgID string `json:"org_id" bun:"org_id,type:text,unique:org_id_type,references:organizations(id),on_delete:cascade"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type InstalledIntegrationConfig map[string]interface{}
|
|
||||||
|
|
||||||
// For serializing from db
|
|
||||||
func (c *InstalledIntegrationConfig) Scan(src interface{}) error {
|
|
||||||
var data []byte
|
|
||||||
switch v := src.(type) {
|
|
||||||
case []byte:
|
|
||||||
data = v
|
|
||||||
case string:
|
|
||||||
data = []byte(v)
|
|
||||||
default:
|
|
||||||
return errors.NewInternalf(errors.CodeInternal, "tried to scan from %T instead of string or bytes", src)
|
|
||||||
}
|
|
||||||
|
|
||||||
return json.Unmarshal(data, c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// For serializing to db
|
|
||||||
func (c *InstalledIntegrationConfig) Value() (driver.Value, error) {
|
|
||||||
filterSetJson, err := json.Marshal(c)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "could not serialize integration config to JSON")
|
|
||||||
}
|
|
||||||
return filterSetJson, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// --------------------------------------------------------------------------
|
|
||||||
// Cloud integration uses the cloud_integration table
|
|
||||||
// and cloud_integrations_service table
|
|
||||||
// --------------------------------------------------------------------------
|
|
||||||
|
|
||||||
type CloudIntegration struct {
|
|
||||||
bun.BaseModel `bun:"table:cloud_integration"`
|
|
||||||
|
|
||||||
Identifiable
|
|
||||||
TimeAuditable
|
|
||||||
Provider string `json:"provider" bun:"provider,type:text,unique:provider_id"`
|
|
||||||
Config *AccountConfig `json:"config" bun:"config,type:text"`
|
|
||||||
AccountID *string `json:"account_id" bun:"account_id,type:text"`
|
|
||||||
LastAgentReport *AgentReport `json:"last_agent_report" bun:"last_agent_report,type:text"`
|
|
||||||
RemovedAt *time.Time `json:"removed_at" bun:"removed_at,type:timestamp,nullzero"`
|
|
||||||
OrgID string `bun:"org_id,type:text,unique:provider_id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *CloudIntegration) Status() AccountStatus {
|
|
||||||
status := AccountStatus{}
|
|
||||||
if a.LastAgentReport != nil {
|
|
||||||
lastHeartbeat := a.LastAgentReport.TimestampMillis
|
|
||||||
status.Integration.LastHeartbeatTsMillis = &lastHeartbeat
|
|
||||||
}
|
|
||||||
return status
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *CloudIntegration) Account() Account {
|
|
||||||
ca := Account{Id: a.ID.StringValue(), Status: a.Status()}
|
|
||||||
|
|
||||||
if a.AccountID != nil {
|
|
||||||
ca.CloudAccountId = *a.AccountID
|
|
||||||
}
|
|
||||||
|
|
||||||
if a.Config != nil {
|
|
||||||
ca.Config = *a.Config
|
|
||||||
} else {
|
|
||||||
ca.Config = DefaultAccountConfig()
|
|
||||||
}
|
|
||||||
return ca
|
|
||||||
}
|
|
||||||
|
|
||||||
type Account struct {
|
|
||||||
Id string `json:"id"`
|
|
||||||
CloudAccountId string `json:"cloud_account_id"`
|
|
||||||
Config AccountConfig `json:"config"`
|
|
||||||
Status AccountStatus `json:"status"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type AccountStatus struct {
|
|
||||||
Integration AccountIntegrationStatus `json:"integration"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type AccountIntegrationStatus struct {
|
|
||||||
LastHeartbeatTsMillis *int64 `json:"last_heartbeat_ts_ms"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func DefaultAccountConfig() AccountConfig {
|
|
||||||
return AccountConfig{
|
|
||||||
EnabledRegions: []string{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type AccountConfig struct {
|
|
||||||
EnabledRegions []string `json:"regions"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// For serializing from db
|
|
||||||
func (c *AccountConfig) Scan(src any) error {
|
|
||||||
var data []byte
|
|
||||||
switch v := src.(type) {
|
|
||||||
case []byte:
|
|
||||||
data = v
|
|
||||||
case string:
|
|
||||||
data = []byte(v)
|
|
||||||
default:
|
|
||||||
return errors.NewInternalf(errors.CodeInternal, "tried to scan from %T instead of string or bytes", src)
|
|
||||||
}
|
|
||||||
|
|
||||||
return json.Unmarshal(data, c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// For serializing to db
|
|
||||||
func (c *AccountConfig) Value() (driver.Value, error) {
|
|
||||||
if c == nil {
|
|
||||||
return nil, errors.NewInternalf(errors.CodeInternal, "cloud account config is nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
serialized, err := json.Marshal(c)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't serialize cloud account config to JSON")
|
|
||||||
}
|
|
||||||
// Return as string instead of []byte to ensure PostgreSQL stores as text, not bytea
|
|
||||||
return string(serialized), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type AgentReport struct {
|
|
||||||
TimestampMillis int64 `json:"timestamp_millis"`
|
|
||||||
Data map[string]any `json:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// For serializing from db
|
|
||||||
func (r *AgentReport) Scan(src any) error {
|
|
||||||
var data []byte
|
|
||||||
switch v := src.(type) {
|
|
||||||
case []byte:
|
|
||||||
data = v
|
|
||||||
case string:
|
|
||||||
data = []byte(v)
|
|
||||||
default:
|
|
||||||
return errors.NewInternalf(errors.CodeInternal, "tried to scan from %T instead of string or bytes", src)
|
|
||||||
}
|
|
||||||
|
|
||||||
return json.Unmarshal(data, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// For serializing to db
|
|
||||||
func (r *AgentReport) Value() (driver.Value, error) {
|
|
||||||
if r == nil {
|
|
||||||
return nil, errors.NewInternalf(errors.CodeInternal, "agent report is nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
serialized, err := json.Marshal(r)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.WrapInternalf(
|
|
||||||
err, errors.CodeInternal, "couldn't serialize agent report to JSON",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
// Return as string instead of []byte to ensure PostgreSQL stores as text, not bytea
|
|
||||||
return string(serialized), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type CloudIntegrationService struct {
|
|
||||||
bun.BaseModel `bun:"table:cloud_integration_service,alias:cis"`
|
|
||||||
|
|
||||||
Identifiable
|
|
||||||
TimeAuditable
|
|
||||||
Type string `bun:"type,type:text,notnull,unique:cloud_integration_id_type"`
|
|
||||||
Config CloudServiceConfig `bun:"config,type:text"`
|
|
||||||
CloudIntegrationID string `bun:"cloud_integration_id,type:text,notnull,unique:cloud_integration_id_type,references:cloud_integrations(id),on_delete:cascade"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type CloudServiceLogsConfig struct {
|
|
||||||
Enabled bool `json:"enabled"`
|
|
||||||
S3Buckets map[string][]string `json:"s3_buckets,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type CloudServiceMetricsConfig struct {
|
|
||||||
Enabled bool `json:"enabled"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type CloudServiceConfig struct {
|
|
||||||
Logs *CloudServiceLogsConfig `json:"logs,omitempty"`
|
|
||||||
Metrics *CloudServiceMetricsConfig `json:"metrics,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// For serializing from db
|
|
||||||
func (c *CloudServiceConfig) Scan(src any) error {
|
|
||||||
var data []byte
|
|
||||||
switch src := src.(type) {
|
|
||||||
case []byte:
|
|
||||||
data = src
|
|
||||||
case string:
|
|
||||||
data = []byte(src)
|
|
||||||
default:
|
|
||||||
return errors.NewInternalf(errors.CodeInternal, "tried to scan from %T instead of string or bytes", src)
|
|
||||||
}
|
|
||||||
|
|
||||||
return json.Unmarshal(data, c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// For serializing to db
|
|
||||||
func (c *CloudServiceConfig) Value() (driver.Value, error) {
|
|
||||||
if c == nil {
|
|
||||||
return nil, errors.NewInternalf(errors.CodeInternal, "cloud service config is nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
serialized, err := json.Marshal(c)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.WrapInternalf(
|
|
||||||
err, errors.CodeInternal, "couldn't serialize cloud service config to JSON",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
// Return as string instead of []byte to ensure PostgreSQL stores as text, not bytea
|
|
||||||
return string(serialized), nil
|
|
||||||
}
|
|
||||||
546
pkg/types/integrationtypes/cloudintegration.go
Normal file
546
pkg/types/integrationtypes/cloudintegration.go
Normal file
@@ -0,0 +1,546 @@
|
|||||||
|
package integrationtypes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql/driver"
|
||||||
|
"encoding/json"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||||
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NOTE:
|
||||||
|
// - When Account keyword is used in struct names, it refers cloud integration account. CloudIntegration refers to DB schema.
|
||||||
|
// - When Account Config keyword is used in struct names, it refers to configuration for cloud integration accounts
|
||||||
|
// - When Service keyword is used in struct names, it refers to cloud integration service. CloudIntegrationService refers to DB schema.
|
||||||
|
// where `service` is services provided by each cloud provider like AWS S3, Azure BlobStorage etc.
|
||||||
|
// - When Service Config keyword is used in struct names, it refers to configuration for cloud integration services
|
||||||
|
|
||||||
|
// Generic utility functions for JSON serialization/deserialization
|
||||||
|
// this is helpful to return right errors from a common place and avoid repeating the same code in multiple places.
|
||||||
|
// UnmarshalJSON is a generic function to unmarshal JSON data into any type
|
||||||
|
func UnmarshalJSON[T any](src []byte, target *T) error {
|
||||||
|
err := json.Unmarshal(src, target)
|
||||||
|
if err != nil {
|
||||||
|
return errors.WrapInternalf(
|
||||||
|
err, errors.CodeInternal, "couldn't deserialize JSON",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON is a generic function to marshal any type to JSON
|
||||||
|
func MarshalJSON[T any](source *T) ([]byte, error) {
|
||||||
|
if source == nil {
|
||||||
|
return nil, errors.NewInternalf(errors.CodeInternal, "source is nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
serialized, err := json.Marshal(source)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.WrapInternalf(
|
||||||
|
err, errors.CodeInternal, "couldn't serialize to JSON",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return serialized, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloudProvider defines the interface to be implemented by different cloud providers.
|
||||||
|
// This is generic interface so it will be accepting and returning generic types instead of concrete.
|
||||||
|
// It's the cloud provider's responsibility to cast them to appropriate types and validate
|
||||||
|
type CloudProvider interface {
|
||||||
|
GetName() CloudProviderType
|
||||||
|
|
||||||
|
// AgentCheckIn is called by agent to heartbeat and get latest config in response.
|
||||||
|
AgentCheckIn(ctx context.Context, req *PostableAgentCheckInPayload) (any, error)
|
||||||
|
// GenerateConnectionArtifact generates cloud provider specific connection information, client side handles how this information is shown
|
||||||
|
GenerateConnectionArtifact(ctx context.Context, req *PostableConnectionArtifact) (any, error)
|
||||||
|
// GetAccountStatus returns agent connection status for a cloud integration account
|
||||||
|
GetAccountStatus(ctx context.Context, orgID, accountID string) (*GettableAccountStatus, error)
|
||||||
|
// ListConnectedAccounts lists accounts where agent is connected
|
||||||
|
ListConnectedAccounts(ctx context.Context, orgID string) (*GettableConnectedAccountsList, error)
|
||||||
|
|
||||||
|
// LIstServices return list of services for a cloud provider attached with the accountID. This just returns a summary
|
||||||
|
ListServices(ctx context.Context, orgID string, accountID *string) (any, error) // returns either GettableAWSServices or GettableAzureServices
|
||||||
|
// GetServiceDetails returns service definition details for a serviceId. This returns config and other details required to show in service details page on client.
|
||||||
|
GetServiceDetails(ctx context.Context, req *GetServiceDetailsReq) (any, error)
|
||||||
|
|
||||||
|
// GetDashboard returns dashboard json for a give cloud integration service dashboard.
|
||||||
|
// this only returns the dashboard when account is connected and service is enabled
|
||||||
|
GetDashboard(ctx context.Context, id string, orgID valuer.UUID) (*dashboardtypes.Dashboard, error)
|
||||||
|
// GetAvailableDashboards returns list of available dashboards across all connected cloud integration accounts in the org.
|
||||||
|
// this list gets added to dashboard list page
|
||||||
|
GetAvailableDashboards(ctx context.Context, orgID valuer.UUID) ([]*dashboardtypes.Dashboard, error)
|
||||||
|
|
||||||
|
// UpdateAccountConfig updates cloud integration account config
|
||||||
|
UpdateAccountConfig(ctx context.Context, orgId valuer.UUID, accountId string, config []byte) (any, error)
|
||||||
|
// UpdateServiceConfig updates cloud integration service config
|
||||||
|
UpdateServiceConfig(ctx context.Context, serviceId string, orgID valuer.UUID, config []byte) (any, error)
|
||||||
|
|
||||||
|
// DisconnectAccount soft deletes/removes a cloud integration account.
|
||||||
|
DisconnectAccount(ctx context.Context, orgID, accountID string) (*CloudIntegration, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GettableConnectedAccountsList is the response for listing connected accounts for a cloud provider.
|
||||||
|
type GettableConnectedAccountsList struct {
|
||||||
|
Accounts []*Account `json:"accounts"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SigNozAgentConfig represents parameters required for agent deployment in cloud provider accounts
|
||||||
|
// these represent parameters passed during agent deployment, how they are passed might change for each cloud provider but the purpose is same.
|
||||||
|
type SigNozAgentConfig struct {
|
||||||
|
Region string `json:"region,omitempty"` // AWS-specific: The region in which SigNoz agent should be installed
|
||||||
|
|
||||||
|
IngestionUrl string `json:"ingestion_url"`
|
||||||
|
IngestionKey string `json:"ingestion_key"`
|
||||||
|
SigNozAPIUrl string `json:"signoz_api_url"`
|
||||||
|
SigNozAPIKey string `json:"signoz_api_key"`
|
||||||
|
|
||||||
|
Version string `json:"version,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PostableConnectionArtifact represent request body for generating connection artifact API.
|
||||||
|
// Data is request body raw bytes since each cloud provider will have have different request body structure and generics hardly help in such cases.
|
||||||
|
// Artifact is a generic name for different types of connection methods like connection URL for AWS, connection command for Azure etc.
|
||||||
|
type PostableConnectionArtifact struct {
|
||||||
|
OrgID string
|
||||||
|
Data []byte // either PostableAWSConnectionUrl or PostableAzureConnectionCommand
|
||||||
|
}
|
||||||
|
|
||||||
|
// PostableAWSConnectionUrl is request body for AWS connection artifact API
|
||||||
|
type PostableAWSConnectionUrl struct {
|
||||||
|
AgentConfig *SigNozAgentConfig `json:"agent_config"`
|
||||||
|
AccountConfig *AWSAccountConfig `json:"account_config"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PostableAzureConnectionCommand is request body for Azure connection artifact API
|
||||||
|
type PostableAzureConnectionCommand struct {
|
||||||
|
AgentConfig *SigNozAgentConfig `json:"agent_config"`
|
||||||
|
AccountConfig *AzureAccountConfig `json:"account_config"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GettableAzureConnectionArtifact is Azure specific connection artifact which contains connection commands for agent deployment
|
||||||
|
type GettableAzureConnectionArtifact struct {
|
||||||
|
AzureShellConnectionCommand string `json:"az_shell_connection_command"`
|
||||||
|
AzureCliConnectionCommand string `json:"az_cli_connection_command"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GettableAWSConnectionUrl is AWS specific connection artifact which contains connection url for agent deployment
|
||||||
|
type GettableAWSConnectionUrl struct {
|
||||||
|
AccountId string `json:"account_id"`
|
||||||
|
ConnectionUrl string `json:"connection_url"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GettableAzureConnectionCommand is Azure specific connection artifact which contains connection commands for agent deployment
|
||||||
|
type GettableAzureConnectionCommand struct {
|
||||||
|
AccountId string `json:"account_id"`
|
||||||
|
AzureShellConnectionCommand string `json:"az_shell_connection_command"`
|
||||||
|
AzureCliConnectionCommand string `json:"az_cli_connection_command"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GettableAccountStatus is cloud integration account status response
|
||||||
|
type GettableAccountStatus struct {
|
||||||
|
Id string `json:"id"`
|
||||||
|
CloudAccountId *string `json:"cloud_account_id,omitempty"`
|
||||||
|
Status AccountStatus `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PostableAgentCheckInPayload is request body for agent check-in API.
|
||||||
|
// This is used by agent to send heartbeat.
|
||||||
|
type PostableAgentCheckInPayload struct {
|
||||||
|
ID string `json:"account_id"`
|
||||||
|
AccountID string `json:"cloud_account_id"`
|
||||||
|
// Arbitrary cloud specific Agent data
|
||||||
|
Data map[string]any `json:"data,omitempty"`
|
||||||
|
OrgID string `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AWSAgentIntegrationConfig is used by agent for deploying infra to send telemetry to SigNoz
|
||||||
|
type AWSAgentIntegrationConfig struct {
|
||||||
|
EnabledRegions []string `json:"enabled_regions"`
|
||||||
|
TelemetryCollectionStrategy *AWSCollectionStrategy `json:"telemetry,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AzureAgentIntegrationConfig is used by agent for deploying infra to send telemetry to SigNoz
|
||||||
|
type AzureAgentIntegrationConfig struct {
|
||||||
|
DeploymentRegion string `json:"deployment_region"` // will not be changed once set
|
||||||
|
EnabledResourceGroups []string `json:"resource_groups"`
|
||||||
|
// TelemetryCollectionStrategy is map of service to telemetry config
|
||||||
|
TelemetryCollectionStrategy map[string]*AzureCollectionStrategy `json:"telemetry,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GettableAgentCheckInRes is generic response from agent check-in API.
|
||||||
|
// AWSAgentIntegrationConfig and AzureAgentIntegrationConfig these configs are used by agent to deploy the infra and send telemetry to SigNoz
|
||||||
|
type GettableAgentCheckInRes[AgentConfigT any] struct {
|
||||||
|
AccountId string `json:"account_id"`
|
||||||
|
CloudAccountId string `json:"cloud_account_id"`
|
||||||
|
RemovedAt *time.Time `json:"removed_at"`
|
||||||
|
IntegrationConfig AgentConfigT `json:"integration_config"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatableServiceConfig is generic
|
||||||
|
type UpdatableServiceConfig[ServiceConfigT any] struct {
|
||||||
|
CloudAccountId string `json:"cloud_account_id"`
|
||||||
|
Config ServiceConfigT `json:"config"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceConfigTyped is a generic interface for cloud integration service's configuration
|
||||||
|
// this is generic interface to define helper functions for CloudIntegrationService.Config field.
|
||||||
|
type ServiceConfigTyped[definition Definition] interface {
|
||||||
|
Validate(def definition) error
|
||||||
|
IsMetricsEnabled() bool
|
||||||
|
IsLogsEnabled() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type AWSServiceConfig struct {
|
||||||
|
Logs *AWSServiceLogsConfig `json:"logs,omitempty"`
|
||||||
|
Metrics *AWSServiceMetricsConfig `json:"metrics,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type AWSServiceLogsConfig struct {
|
||||||
|
Enabled bool `json:"enabled"`
|
||||||
|
S3Buckets map[string][]string `json:"s3_buckets,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type AWSServiceMetricsConfig struct {
|
||||||
|
Enabled bool `json:"enabled"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsMetricsEnabled returns true if metrics collection is configured and enabled
|
||||||
|
func (a *AWSServiceConfig) IsMetricsEnabled() bool {
|
||||||
|
return a.Metrics != nil && a.Metrics.Enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsLogsEnabled returns true if logs collection is configured and enabled
|
||||||
|
func (a *AWSServiceConfig) IsLogsEnabled() bool {
|
||||||
|
return a.Logs != nil && a.Logs.Enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
type AzureServiceConfig struct {
|
||||||
|
Logs []*AzureServiceLogsConfig `json:"logs,omitempty"`
|
||||||
|
Metrics []*AzureServiceMetricsConfig `json:"metrics,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AzureServiceLogsConfig is Azure specific service config for logs
|
||||||
|
type AzureServiceLogsConfig struct {
|
||||||
|
Enabled bool `json:"enabled"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AzureServiceMetricsConfig is Azure specific service config for metrics
|
||||||
|
type AzureServiceMetricsConfig struct {
|
||||||
|
Enabled bool `json:"enabled"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsMetricsEnabled returns true if any metric is configured and enabled
|
||||||
|
func (a *AzureServiceConfig) IsMetricsEnabled() bool {
|
||||||
|
if a.Metrics == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, m := range a.Metrics {
|
||||||
|
if m.Enabled {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsLogsEnabled returns true if any log is configured and enabled
|
||||||
|
func (a *AzureServiceConfig) IsLogsEnabled() bool {
|
||||||
|
if a.Logs == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, l := range a.Logs {
|
||||||
|
if l.Enabled {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AWSServiceConfig) Validate(def *AWSDefinition) error {
|
||||||
|
if def.Id != S3Sync && a.Logs != nil && a.Logs.S3Buckets != nil {
|
||||||
|
return errors.NewInvalidInputf(errors.CodeInvalidInput, "s3 buckets can only be added to service-type[%s]", S3Sync)
|
||||||
|
} else if def.Id == S3Sync && a.Logs != nil && a.Logs.S3Buckets != nil {
|
||||||
|
for region := range a.Logs.S3Buckets {
|
||||||
|
if _, found := ValidAWSRegions[region]; !found {
|
||||||
|
return errors.NewInvalidInputf(CodeInvalidCloudRegion, "invalid cloud region: %s", region)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AzureServiceConfig) Validate(def *AzureDefinition) error {
|
||||||
|
logsMap := make(map[string]bool)
|
||||||
|
metricsMap := make(map[string]bool)
|
||||||
|
|
||||||
|
if def.Strategy != nil && def.Strategy.Logs != nil {
|
||||||
|
for _, log := range def.Strategy.Logs {
|
||||||
|
logsMap[log.Name] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if def.Strategy != nil && def.Strategy.Metrics != nil {
|
||||||
|
for _, metric := range def.Strategy.Metrics {
|
||||||
|
metricsMap[metric.Name] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, log := range a.Logs {
|
||||||
|
if _, found := logsMap[log.Name]; !found {
|
||||||
|
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid log name: %s", log.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, metric := range a.Metrics {
|
||||||
|
if _, found := metricsMap[metric.Name]; !found {
|
||||||
|
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid metric name: %s", metric.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatableServiceConfigRes is response for UpdateServiceConfig API
|
||||||
|
// TODO: find a better way to name this
|
||||||
|
type UpdatableServiceConfigRes struct {
|
||||||
|
ServiceId string `json:"id"`
|
||||||
|
Config any `json:"config"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatableAccountConfigTyped is a generic struct for updating cloud integration account config used in UpdateAccountConfig API
|
||||||
|
type UpdatableAccountConfigTyped[AccountConfigT any] struct {
|
||||||
|
Config *AccountConfigT `json:"config"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type UpdatableAWSAccountConfig = UpdatableAccountConfigTyped[AWSAccountConfig]
|
||||||
|
type UpdatableAzureAccountConfig = UpdatableAccountConfigTyped[AzureAccountConfig]
|
||||||
|
|
||||||
|
// AWSAccountConfig is the configuration for AWS cloud integration account
|
||||||
|
type AWSAccountConfig struct {
|
||||||
|
EnabledRegions []string `json:"regions"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AzureAccountConfig is the configuration for Azure cloud integration account
|
||||||
|
type AzureAccountConfig struct {
|
||||||
|
DeploymentRegion string `json:"deployment_region,omitempty"`
|
||||||
|
EnabledResourceGroups []string `json:"resource_groups,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GettableServices is a generic struct for listing services of a cloud integration account used in ListServices API
|
||||||
|
type GettableServices[ServiceSummaryT any] struct {
|
||||||
|
Services []ServiceSummaryT `json:"services"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GettableAWSServices = GettableServices[AWSServiceSummary]
|
||||||
|
type GettableAzureServices = GettableServices[AzureServiceSummary]
|
||||||
|
|
||||||
|
// GetServiceDetailsReq is a req struct for getting service definition details
|
||||||
|
type GetServiceDetailsReq struct {
|
||||||
|
OrgID valuer.UUID
|
||||||
|
ServiceId string
|
||||||
|
CloudAccountID *string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceSummary is a generic struct for service summary used in ListServices API
|
||||||
|
type ServiceSummary[ServiceConfigT any] struct {
|
||||||
|
DefinitionMetadata
|
||||||
|
Config *ServiceConfigT `json:"config"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type AWSServiceSummary = ServiceSummary[AWSServiceConfig]
|
||||||
|
type AzureServiceSummary = ServiceSummary[AzureServiceConfig]
|
||||||
|
|
||||||
|
// GettableServiceDetails is a generic struct for service details used in GetServiceDetails API
|
||||||
|
type GettableServiceDetails[DefinitionT any, ServiceConfigT any] struct {
|
||||||
|
Definition DefinitionT `json:",inline"`
|
||||||
|
Config ServiceConfigT `json:"config"`
|
||||||
|
ConnectionStatus *ServiceConnectionStatus `json:"status,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GettableAWSServiceDetails = GettableServiceDetails[AWSDefinition, *AWSServiceConfig]
|
||||||
|
type GettableAzureServiceDetails = GettableServiceDetails[AzureDefinition, *AzureServiceConfig]
|
||||||
|
|
||||||
|
// Account represents a cloud integration account, this is used for business logic and API responses.
|
||||||
|
type Account struct {
|
||||||
|
Id string `json:"id"`
|
||||||
|
CloudAccountId string `json:"cloud_account_id"`
|
||||||
|
Config any `json:"config"` // AWSAccountConfig or AzureAccountConfig
|
||||||
|
Status AccountStatus `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountStatus is generic struct for cloud integration account status
|
||||||
|
type AccountStatus struct {
|
||||||
|
Integration AccountIntegrationStatus `json:"integration"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountIntegrationStatus stores heartbeat information from agent check in
|
||||||
|
type AccountIntegrationStatus struct {
|
||||||
|
LastHeartbeatTsMillis *int64 `json:"last_heartbeat_ts_ms"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceConnectionStatus represents integration connection status for a particular service
|
||||||
|
// this struct helps to check ingested data and determines connection status by whether data was ingested or not.
|
||||||
|
// this is composite struct for both metrics and logs
|
||||||
|
type ServiceConnectionStatus struct {
|
||||||
|
Logs []*SignalConnectionStatus `json:"logs"`
|
||||||
|
Metrics []*SignalConnectionStatus `json:"metrics"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignalConnectionStatus represents connection status for a particular signal type (logs or metrics) for a service
|
||||||
|
// this struct is used in API responses for clients to show relevant information about the connection status.
|
||||||
|
type SignalConnectionStatus struct {
|
||||||
|
CategoryID string `json:"category"`
|
||||||
|
CategoryDisplayName string `json:"category_display_name"`
|
||||||
|
LastReceivedTsMillis int64 `json:"last_received_ts_ms"` // epoch milliseconds
|
||||||
|
LastReceivedFrom string `json:"last_received_from"` // resource identifier
|
||||||
|
}
|
||||||
|
|
||||||
|
// GettableCloudIntegrationConnectionParams is response for connection params API
|
||||||
|
type GettableCloudIntegrationConnectionParams struct {
|
||||||
|
IngestionUrl string `json:"ingestion_url,omitempty"`
|
||||||
|
IngestionKey string `json:"ingestion_key,omitempty"`
|
||||||
|
SigNozAPIUrl string `json:"signoz_api_url,omitempty"`
|
||||||
|
SigNozAPIKey string `json:"signoz_api_key,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GettableIngestionKey is a struct for ingestion key returned from gateway
|
||||||
|
type GettableIngestionKey struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Value string `json:"value"`
|
||||||
|
// other attributes from gateway response not included here since they are not being used.
|
||||||
|
}
|
||||||
|
|
||||||
|
// GettableIngestionKeysSearch is a struct for response of ingestion keys search API on gateway
|
||||||
|
type GettableIngestionKeysSearch struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Data []GettableIngestionKey `json:"data"`
|
||||||
|
Error string `json:"error"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GettableCreateIngestionKey is a struct for response of create ingestion key API on gateway
|
||||||
|
type GettableCreateIngestionKey struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Data GettableIngestionKey `json:"data"`
|
||||||
|
Error string `json:"error"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GettableDeployment is response struct for deployment details fetched from Zeus
|
||||||
|
type GettableDeployment struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
ClusterInfo struct {
|
||||||
|
Region struct {
|
||||||
|
DNS string `json:"dns"`
|
||||||
|
} `json:"region"`
|
||||||
|
} `json:"cluster"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// --------------------------------------------------------------------------
|
||||||
|
// DATABASE TYPES
|
||||||
|
// --------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// --------------------------------------------------------------------------
|
||||||
|
// Cloud integration uses the cloud_integration table
|
||||||
|
// and cloud_integrations_service table
|
||||||
|
// --------------------------------------------------------------------------
|
||||||
|
|
||||||
|
type CloudIntegration struct {
|
||||||
|
bun.BaseModel `bun:"table:cloud_integration"`
|
||||||
|
|
||||||
|
types.Identifiable
|
||||||
|
types.TimeAuditable
|
||||||
|
Provider string `json:"provider" bun:"provider,type:text,unique:provider_id"`
|
||||||
|
Config string `json:"config" bun:"config,type:text"` // json serialized config
|
||||||
|
AccountID *string `json:"account_id" bun:"account_id,type:text"`
|
||||||
|
LastAgentReport *AgentReport `json:"last_agent_report" bun:"last_agent_report,type:text"`
|
||||||
|
RemovedAt *time.Time `json:"removed_at" bun:"removed_at,type:timestamp,nullzero"`
|
||||||
|
OrgID string `bun:"org_id,type:text,unique:provider_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *CloudIntegration) Status() AccountStatus {
|
||||||
|
status := AccountStatus{}
|
||||||
|
if a.LastAgentReport != nil {
|
||||||
|
lastHeartbeat := a.LastAgentReport.TimestampMillis
|
||||||
|
status.Integration.LastHeartbeatTsMillis = &lastHeartbeat
|
||||||
|
}
|
||||||
|
return status
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *CloudIntegration) Account(cloudProvider CloudProviderType) *Account {
|
||||||
|
ca := &Account{Id: a.ID.StringValue(), Status: a.Status()}
|
||||||
|
|
||||||
|
if a.AccountID != nil {
|
||||||
|
ca.CloudAccountId = *a.AccountID
|
||||||
|
}
|
||||||
|
|
||||||
|
ca.Config = map[string]interface{}{}
|
||||||
|
|
||||||
|
if len(a.Config) < 1 {
|
||||||
|
return ca
|
||||||
|
}
|
||||||
|
|
||||||
|
switch cloudProvider {
|
||||||
|
case CloudProviderAWS:
|
||||||
|
config := new(AWSAccountConfig)
|
||||||
|
_ = UnmarshalJSON([]byte(a.Config), config)
|
||||||
|
ca.Config = config
|
||||||
|
case CloudProviderAzure:
|
||||||
|
config := new(AzureAccountConfig)
|
||||||
|
_ = UnmarshalJSON([]byte(a.Config), config)
|
||||||
|
ca.Config = config
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
return ca
|
||||||
|
}
|
||||||
|
|
||||||
|
type AgentReport struct {
|
||||||
|
TimestampMillis int64 `json:"timestamp_millis"`
|
||||||
|
Data map[string]any `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan scans data from db
|
||||||
|
func (r *AgentReport) Scan(src any) error {
|
||||||
|
var data []byte
|
||||||
|
switch v := src.(type) {
|
||||||
|
case []byte:
|
||||||
|
data = v
|
||||||
|
case string:
|
||||||
|
data = []byte(v)
|
||||||
|
default:
|
||||||
|
return errors.NewInternalf(errors.CodeInternal, "tried to scan from %T instead of string or bytes", src)
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Unmarshal(data, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value serializes data to bytes for db insertion
|
||||||
|
func (r *AgentReport) Value() (driver.Value, error) {
|
||||||
|
if r == nil {
|
||||||
|
return nil, errors.NewInternalf(errors.CodeInternal, "agent report is nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
serialized, err := json.Marshal(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.WrapInternalf(
|
||||||
|
err, errors.CodeInternal, "couldn't serialize agent report to JSON",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return serialized, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type CloudIntegrationService struct {
|
||||||
|
bun.BaseModel `bun:"table:cloud_integration_service,alias:cis"`
|
||||||
|
|
||||||
|
types.Identifiable
|
||||||
|
types.TimeAuditable
|
||||||
|
Type string `bun:"type,type:text,notnull,unique:cloud_integration_id_type"`
|
||||||
|
Config string `bun:"config,type:text"` // json serialized config
|
||||||
|
CloudIntegrationID string `bun:"cloud_integration_id,type:text,notnull,unique:cloud_integration_id_type,references:cloud_integrations(id),on_delete:cascade"`
|
||||||
|
}
|
||||||
263
pkg/types/integrationtypes/cloudservicedefinitions.go
Normal file
263
pkg/types/integrationtypes/cloudservicedefinitions.go
Normal file
@@ -0,0 +1,263 @@
|
|||||||
|
package integrationtypes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||||
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
S3Sync = "s3sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Generic interface for cloud service definition.
|
||||||
|
// This is implemented by AWSDefinition and AzureDefinition, which represent service definitions for AWS and Azure respectively.
|
||||||
|
// Generics work well so far because service definitions share a similar logic.
|
||||||
|
// We dont want to over-do generics as well, if the service definitions functionally diverge in the future consider breaking generics.
|
||||||
|
type Definition interface {
|
||||||
|
GetId() string
|
||||||
|
Validate() error
|
||||||
|
PopulateDashboardURLs(cloudProvider CloudProviderType, svcId string)
|
||||||
|
GetIngestionStatusCheck() *IngestionStatusCheck
|
||||||
|
GetAssets() Assets
|
||||||
|
}
|
||||||
|
|
||||||
|
// AWSDefinition represents AWS Service definition, which includes collection strategy, dashboards and meta info for integration
|
||||||
|
type AWSDefinition = ServiceDefinition[AWSCollectionStrategy]
|
||||||
|
|
||||||
|
// AzureDefinition represents Azure Service definition, which includes collection strategy, dashboards and meta info for integration
|
||||||
|
type AzureDefinition = ServiceDefinition[AzureCollectionStrategy]
|
||||||
|
|
||||||
|
// Making AWSDefinition and AzureDefinition satisfy Definition interface, so that they can be used in a generic way
|
||||||
|
var _ Definition = &AWSDefinition{}
|
||||||
|
var _ Definition = &AzureDefinition{}
|
||||||
|
|
||||||
|
// ServiceDefinition represents generic struct for cloud service, regardless of the cloud provider.
|
||||||
|
// this struct must satify Definition interface.
|
||||||
|
// StrategyT is of either AWSCollectionStrategy or AzureCollectionStrategy, depending on the cloud provider.
|
||||||
|
type ServiceDefinition[StrategyT any] struct {
|
||||||
|
DefinitionMetadata
|
||||||
|
Overview string `json:"overview"` // markdown
|
||||||
|
Assets Assets `json:"assets"`
|
||||||
|
SupportedSignals SupportedSignals `json:"supported_signals"`
|
||||||
|
DataCollected DataCollected `json:"data_collected"`
|
||||||
|
IngestionStatusCheck *IngestionStatusCheck `json:"ingestion_status_check,omitempty"`
|
||||||
|
Strategy *StrategyT `json:"telemetry_collection_strategy"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Following methods are quite self explanatory, they are just to satisfy the Definition interface and provide some utility functions for service definitions.
|
||||||
|
func (def *ServiceDefinition[StrategyT]) GetId() string {
|
||||||
|
return def.Id
|
||||||
|
}
|
||||||
|
|
||||||
|
func (def *ServiceDefinition[StrategyT]) Validate() error {
|
||||||
|
seenDashboardIds := map[string]interface{}{}
|
||||||
|
|
||||||
|
if def.Strategy == nil {
|
||||||
|
return errors.NewInternalf(errors.CodeInternal, "telemetry_collection_strategy is required")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, dd := range def.Assets.Dashboards {
|
||||||
|
if _, seen := seenDashboardIds[dd.Id]; seen {
|
||||||
|
return errors.NewInternalf(errors.CodeInternal, "multiple dashboards found with id %s", dd.Id)
|
||||||
|
}
|
||||||
|
seenDashboardIds[dd.Id] = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (def *ServiceDefinition[StrategyT]) PopulateDashboardURLs(cloudProvider CloudProviderType, svcId string) {
|
||||||
|
for i := range def.Assets.Dashboards {
|
||||||
|
dashboardId := def.Assets.Dashboards[i].Id
|
||||||
|
url := "/dashboard/" + GetCloudIntegrationDashboardID(cloudProvider, svcId, dashboardId)
|
||||||
|
def.Assets.Dashboards[i].Url = url
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (def *ServiceDefinition[StrategyT]) GetIngestionStatusCheck() *IngestionStatusCheck {
|
||||||
|
return def.IngestionStatusCheck
|
||||||
|
}
|
||||||
|
|
||||||
|
func (def *ServiceDefinition[StrategyT]) GetAssets() Assets {
|
||||||
|
return def.Assets
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefinitionMetadata represents service definition metadata. This is useful for showing service overview
|
||||||
|
type DefinitionMetadata struct {
|
||||||
|
Id string `json:"id"`
|
||||||
|
Title string `json:"title"`
|
||||||
|
Icon string `json:"icon"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// IngestionStatusCheckCategory represents a category of ingestion status check. Applies for both metrics and logs.
|
||||||
|
// A category can be "Overview" of metrics or "Enhanced" Metrics for AWS, and "Transaction" or "Capacity" metrics for Azure.
|
||||||
|
// Each category can have multiple checks (AND logic), if all checks pass,
|
||||||
|
// then we can be sure that data is being ingested for that category of the signal
|
||||||
|
type IngestionStatusCheckCategory struct {
|
||||||
|
Category string `json:"category"`
|
||||||
|
DisplayName string `json:"display_name"`
|
||||||
|
Checks []*IngestionStatusCheckAttribute `json:"checks"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// IngestionStatusCheckAttribute represents a check or condition for ingestion status.
|
||||||
|
// Key can be metric name or part of log message
|
||||||
|
type IngestionStatusCheckAttribute struct {
|
||||||
|
Key string `json:"key"` // OPTIONAL search key (metric name or log message)
|
||||||
|
Attributes []*IngestionStatusCheckAttributeFilter `json:"attributes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// IngestionStatusCheck represents combined checks for metrics and logs for a service
|
||||||
|
type IngestionStatusCheck struct {
|
||||||
|
Metrics []*IngestionStatusCheckCategory `json:"metrics"`
|
||||||
|
Logs []*IngestionStatusCheckCategory `json:"logs"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// IngestionStatusCheckAttributeFilter represents filter for a check, which can be used to filter specific log messages or metrics with specific attributes.
|
||||||
|
// For example, we can use it to filter logs with specific log level or metrics with specific dimensions.
|
||||||
|
type IngestionStatusCheckAttributeFilter struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Operator string `json:"operator"`
|
||||||
|
Value string `json:"value"` // OPTIONAL
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assets represents the collection of dashboards
|
||||||
|
type Assets struct {
|
||||||
|
Dashboards []Dashboard `json:"dashboards"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SupportedSignals for cloud provider's service
|
||||||
|
type SupportedSignals struct {
|
||||||
|
Logs bool `json:"logs"`
|
||||||
|
Metrics bool `json:"metrics"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DataCollected is curated static list of metrics and logs, this is shown as part of service overview
|
||||||
|
type DataCollected struct {
|
||||||
|
Logs []CollectedLogAttribute `json:"logs"`
|
||||||
|
Metrics []CollectedMetric `json:"metrics"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CollectedLogAttribute represents a log attribute that is present in all log entries for a service,
|
||||||
|
// this is shown as part of service overview
|
||||||
|
type CollectedLogAttribute struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CollectedMetric represents a metric that is collected for a service, this is shown as part of service overview
|
||||||
|
type CollectedMetric struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Unit string `json:"unit"`
|
||||||
|
Description string `json:"description"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AWSCollectionStrategy represents signal collection strategy for AWS services.
|
||||||
|
// this is AWS specific.
|
||||||
|
type AWSCollectionStrategy struct {
|
||||||
|
Metrics *AWSMetricsStrategy `json:"aws_metrics,omitempty"`
|
||||||
|
Logs *AWSLogsStrategy `json:"aws_logs,omitempty"`
|
||||||
|
S3Buckets map[string][]string `json:"s3_buckets,omitempty"` // Only available in S3 Sync Service Type in AWS
|
||||||
|
}
|
||||||
|
|
||||||
|
// AzureCollectionStrategy represents signal collection strategy for Azure services.
|
||||||
|
// this is Azure specific.
|
||||||
|
type AzureCollectionStrategy struct {
|
||||||
|
Metrics []*AzureMetricsStrategy `json:"azure_metrics,omitempty"`
|
||||||
|
Logs []*AzureLogsStrategy `json:"azure_logs,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AWSMetricsStrategy represents metrics collection strategy for AWS services.
|
||||||
|
// this is AWS specific.
|
||||||
|
type AWSMetricsStrategy struct {
|
||||||
|
// to be used as https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-metricstream.html#cfn-cloudwatch-metricstream-includefilters
|
||||||
|
StreamFilters []struct {
|
||||||
|
// json tags here are in the shape expected by AWS API as detailed at
|
||||||
|
// https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudwatch-metricstream-metricstreamfilter.html
|
||||||
|
Namespace string `json:"Namespace"`
|
||||||
|
MetricNames []string `json:"MetricNames,omitempty"`
|
||||||
|
} `json:"cloudwatch_metric_stream_filters"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AWSLogsStrategy represents logs collection strategy for AWS services.
|
||||||
|
// this is AWS specific.
|
||||||
|
type AWSLogsStrategy struct {
|
||||||
|
Subscriptions []struct {
|
||||||
|
// subscribe to all logs groups with specified prefix.
|
||||||
|
// eg: `/aws/rds/`
|
||||||
|
LogGroupNamePrefix string `json:"log_group_name_prefix"`
|
||||||
|
|
||||||
|
// https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html
|
||||||
|
// "" implies no filtering is required.
|
||||||
|
FilterPattern string `json:"filter_pattern"`
|
||||||
|
} `json:"cloudwatch_logs_subscriptions"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AzureMetricsStrategy represents metrics collection strategy for Azure services.
|
||||||
|
// this is Azure specific.
|
||||||
|
type AzureMetricsStrategy struct {
|
||||||
|
CategoryType string `json:"category_type"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AzureLogsStrategy represents logs collection strategy for Azure services.
|
||||||
|
// this is Azure specific. Even though this is similar to AzureMetricsStrategy, keeping it separate for future flexibility and clarity.
|
||||||
|
type AzureLogsStrategy struct {
|
||||||
|
CategoryType string `json:"category_type"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dashboard represents a dashboard definition for cloud integration.
|
||||||
|
type Dashboard struct {
|
||||||
|
Id string `json:"id"`
|
||||||
|
Url string `json:"url"`
|
||||||
|
Title string `json:"title"`
|
||||||
|
Description string `json:"description"`
|
||||||
|
Image string `json:"image"`
|
||||||
|
Definition *dashboardtypes.StorableDashboardData `json:"definition,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UTILS
|
||||||
|
|
||||||
|
// GetCloudIntegrationDashboardID returns the dashboard id for a cloud integration, given the cloud provider, service id, and dashboard id.
|
||||||
|
// This is used to generate unique dashboard ids for cloud integration, and also to parse the dashboard id to get the cloud provider and service id when needed.
|
||||||
|
func GetCloudIntegrationDashboardID(cloudProvider valuer.String, svcId, dashboardId string) string {
|
||||||
|
return fmt.Sprintf("cloud-integration--%s--%s--%s", cloudProvider, svcId, dashboardId)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDashboardsFromAssets returns the list of dashboards for the cloud provider service from definition
|
||||||
|
func GetDashboardsFromAssets(
|
||||||
|
svcId string,
|
||||||
|
orgID valuer.UUID,
|
||||||
|
cloudProvider CloudProviderType,
|
||||||
|
createdAt *time.Time,
|
||||||
|
assets Assets,
|
||||||
|
) []*dashboardtypes.Dashboard {
|
||||||
|
dashboards := make([]*dashboardtypes.Dashboard, 0)
|
||||||
|
|
||||||
|
for _, d := range assets.Dashboards {
|
||||||
|
author := fmt.Sprintf("%s-integration", cloudProvider)
|
||||||
|
dashboards = append(dashboards, &dashboardtypes.Dashboard{
|
||||||
|
ID: GetCloudIntegrationDashboardID(cloudProvider, svcId, d.Id),
|
||||||
|
Locked: true,
|
||||||
|
OrgID: orgID,
|
||||||
|
Data: *d.Definition,
|
||||||
|
TimeAuditable: types.TimeAuditable{
|
||||||
|
CreatedAt: *createdAt,
|
||||||
|
UpdatedAt: *createdAt,
|
||||||
|
},
|
||||||
|
UserAuditable: types.UserAuditable{
|
||||||
|
CreatedBy: author,
|
||||||
|
UpdatedBy: author,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return dashboards
|
||||||
|
}
|
||||||
103
pkg/types/integrationtypes/constants.go
Normal file
103
pkg/types/integrationtypes/constants.go
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
package integrationtypes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
CodeInvalidCloudRegion = errors.MustNewCode("invalid_cloud_region")
|
||||||
|
CodeMismatchCloudProvider = errors.MustNewCode("cloud_provider_mismatch")
|
||||||
|
)
|
||||||
|
|
||||||
|
// List of all valid cloud regions on Amazon Web Services
|
||||||
|
var ValidAWSRegions = map[string]bool{
|
||||||
|
"af-south-1": true, // Africa (Cape Town).
|
||||||
|
"ap-east-1": true, // Asia Pacific (Hong Kong).
|
||||||
|
"ap-northeast-1": true, // Asia Pacific (Tokyo).
|
||||||
|
"ap-northeast-2": true, // Asia Pacific (Seoul).
|
||||||
|
"ap-northeast-3": true, // Asia Pacific (Osaka).
|
||||||
|
"ap-south-1": true, // Asia Pacific (Mumbai).
|
||||||
|
"ap-south-2": true, // Asia Pacific (Hyderabad).
|
||||||
|
"ap-southeast-1": true, // Asia Pacific (Singapore).
|
||||||
|
"ap-southeast-2": true, // Asia Pacific (Sydney).
|
||||||
|
"ap-southeast-3": true, // Asia Pacific (Jakarta).
|
||||||
|
"ap-southeast-4": true, // Asia Pacific (Melbourne).
|
||||||
|
"ca-central-1": true, // Canada (Central).
|
||||||
|
"ca-west-1": true, // Canada West (Calgary).
|
||||||
|
"eu-central-1": true, // Europe (Frankfurt).
|
||||||
|
"eu-central-2": true, // Europe (Zurich).
|
||||||
|
"eu-north-1": true, // Europe (Stockholm).
|
||||||
|
"eu-south-1": true, // Europe (Milan).
|
||||||
|
"eu-south-2": true, // Europe (Spain).
|
||||||
|
"eu-west-1": true, // Europe (Ireland).
|
||||||
|
"eu-west-2": true, // Europe (London).
|
||||||
|
"eu-west-3": true, // Europe (Paris).
|
||||||
|
"il-central-1": true, // Israel (Tel Aviv).
|
||||||
|
"me-central-1": true, // Middle East (UAE).
|
||||||
|
"me-south-1": true, // Middle East (Bahrain).
|
||||||
|
"sa-east-1": true, // South America (Sao Paulo).
|
||||||
|
"us-east-1": true, // US East (N. Virginia).
|
||||||
|
"us-east-2": true, // US East (Ohio).
|
||||||
|
"us-west-1": true, // US West (N. California).
|
||||||
|
"us-west-2": true, // US West (Oregon).
|
||||||
|
}
|
||||||
|
|
||||||
|
// List of all valid cloud regions for Microsoft Azure
|
||||||
|
var ValidAzureRegions = map[string]bool{
|
||||||
|
"australiacentral": true, // Australia Central
|
||||||
|
"australiacentral2": true, // Australia Central 2
|
||||||
|
"australiaeast": true, // Australia East
|
||||||
|
"australiasoutheast": true, // Australia Southeast
|
||||||
|
"austriaeast": true, // Austria East
|
||||||
|
"belgiumcentral": true, // Belgium Central
|
||||||
|
"brazilsouth": true, // Brazil South
|
||||||
|
"brazilsoutheast": true, // Brazil Southeast
|
||||||
|
"canadacentral": true, // Canada Central
|
||||||
|
"canadaeast": true, // Canada East
|
||||||
|
"centralindia": true, // Central India
|
||||||
|
"centralus": true, // Central US
|
||||||
|
"chilecentral": true, // Chile Central
|
||||||
|
"denmarkeast": true, // Denmark East
|
||||||
|
"eastasia": true, // East Asia
|
||||||
|
"eastus": true, // East US
|
||||||
|
"eastus2": true, // East US 2
|
||||||
|
"francecentral": true, // France Central
|
||||||
|
"francesouth": true, // France South
|
||||||
|
"germanynorth": true, // Germany North
|
||||||
|
"germanywestcentral": true, // Germany West Central
|
||||||
|
"indonesiacentral": true, // Indonesia Central
|
||||||
|
"israelcentral": true, // Israel Central
|
||||||
|
"italynorth": true, // Italy North
|
||||||
|
"japaneast": true, // Japan East
|
||||||
|
"japanwest": true, // Japan West
|
||||||
|
"koreacentral": true, // Korea Central
|
||||||
|
"koreasouth": true, // Korea South
|
||||||
|
"malaysiawest": true, // Malaysia West
|
||||||
|
"mexicocentral": true, // Mexico Central
|
||||||
|
"newzealandnorth": true, // New Zealand North
|
||||||
|
"northcentralus": true, // North Central US
|
||||||
|
"northeurope": true, // North Europe
|
||||||
|
"norwayeast": true, // Norway East
|
||||||
|
"norwaywest": true, // Norway West
|
||||||
|
"polandcentral": true, // Poland Central
|
||||||
|
"qatarcentral": true, // Qatar Central
|
||||||
|
"southafricanorth": true, // South Africa North
|
||||||
|
"southafricawest": true, // South Africa West
|
||||||
|
"southcentralus": true, // South Central US
|
||||||
|
"southindia": true, // South India
|
||||||
|
"southeastasia": true, // Southeast Asia
|
||||||
|
"spaincentral": true, // Spain Central
|
||||||
|
"swedencentral": true, // Sweden Central
|
||||||
|
"switzerlandnorth": true, // Switzerland North
|
||||||
|
"switzerlandwest": true, // Switzerland West
|
||||||
|
"uaecentral": true, // UAE Central
|
||||||
|
"uaenorth": true, // UAE North
|
||||||
|
"uksouth": true, // UK South
|
||||||
|
"ukwest": true, // UK West
|
||||||
|
"westcentralus": true, // West Central US
|
||||||
|
"westeurope": true, // West Europe
|
||||||
|
"westindia": true, // West India
|
||||||
|
"westus": true, // West US
|
||||||
|
"westus2": true, // West US 2
|
||||||
|
"westus3": true, // West US 3
|
||||||
|
}
|
||||||
114
pkg/types/integrationtypes/integration.go
Normal file
114
pkg/types/integrationtypes/integration.go
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
package integrationtypes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql/driver"
|
||||||
|
"encoding/json"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
|
"github.com/SigNoz/signoz/pkg/types"
|
||||||
|
"github.com/SigNoz/signoz/pkg/valuer"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CloudProviderType type alias
|
||||||
|
type CloudProviderType = valuer.String
|
||||||
|
|
||||||
|
var (
|
||||||
|
CloudProviderAWS = valuer.NewString("aws")
|
||||||
|
CloudProviderAzure = valuer.NewString("azure")
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
CodeCloudProviderInvalidInput = errors.MustNewCode("invalid_cloud_provider")
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewCloudProvider returns a new CloudProviderType from a string. It validates the input and returns an error if the input is not valid.
|
||||||
|
func NewCloudProvider(provider string) (CloudProviderType, error) {
|
||||||
|
switch provider {
|
||||||
|
case CloudProviderAWS.String(), CloudProviderAzure.String():
|
||||||
|
return valuer.NewString(provider), nil
|
||||||
|
default:
|
||||||
|
return CloudProviderType{}, errors.NewInvalidInputf(CodeCloudProviderInvalidInput, "invalid cloud provider: %s", provider)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
AWSIntegrationUserEmail = valuer.MustNewEmail("aws-integration@signoz.io")
|
||||||
|
AzureIntegrationUserEmail = valuer.MustNewEmail("azure-integration@signoz.io")
|
||||||
|
)
|
||||||
|
|
||||||
|
// CloudIntegrationUserEmails is the list of valid emails for Cloud One Click integrations.
|
||||||
|
// This is used for validation and restrictions in different contexts, across codebase.
|
||||||
|
var CloudIntegrationUserEmails = []valuer.Email{
|
||||||
|
AWSIntegrationUserEmail,
|
||||||
|
AzureIntegrationUserEmail,
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsCloudIntegrationDashboardUuid(dashboardUuid string) bool {
|
||||||
|
parts := strings.SplitN(dashboardUuid, "--", 4)
|
||||||
|
if len(parts) != 4 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return parts[0] == "cloud-integration"
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCloudIntegrationDashboardID returns the cloud provider from dashboard id, if it's a cloud integration dashboard id.
|
||||||
|
// throws an error if invalid format or invalid cloud provider is provided in the dashboard id.
|
||||||
|
func GetCloudProviderFromDashboardID(dashboardUuid string) (CloudProviderType, error) {
|
||||||
|
parts := strings.SplitN(dashboardUuid, "--", 4)
|
||||||
|
if len(parts) != 4 {
|
||||||
|
return valuer.String{}, errors.NewInvalidInputf(CodeCloudProviderInvalidInput, "invalid dashboard uuid: %s", dashboardUuid)
|
||||||
|
}
|
||||||
|
|
||||||
|
providerStr := parts[1]
|
||||||
|
|
||||||
|
cloudProvider, err := NewCloudProvider(providerStr)
|
||||||
|
if err != nil {
|
||||||
|
return CloudProviderType{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return cloudProvider, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// --------------------------------------------------------------------------
|
||||||
|
// Normal integration uses just the installed_integration table
|
||||||
|
// --------------------------------------------------------------------------
|
||||||
|
|
||||||
|
type InstalledIntegration struct {
|
||||||
|
bun.BaseModel `bun:"table:installed_integration"`
|
||||||
|
|
||||||
|
types.Identifiable
|
||||||
|
Type string `json:"type" bun:"type,type:text,unique:org_id_type"`
|
||||||
|
Config InstalledIntegrationConfig `json:"config" bun:"config,type:text"`
|
||||||
|
InstalledAt time.Time `json:"installed_at" bun:"installed_at,default:current_timestamp"`
|
||||||
|
OrgID string `json:"org_id" bun:"org_id,type:text,unique:org_id_type,references:organizations(id),on_delete:cascade"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type InstalledIntegrationConfig map[string]interface{}
|
||||||
|
|
||||||
|
// Scan scans data from db
|
||||||
|
func (c *InstalledIntegrationConfig) Scan(src interface{}) error {
|
||||||
|
var data []byte
|
||||||
|
switch v := src.(type) {
|
||||||
|
case []byte:
|
||||||
|
data = v
|
||||||
|
case string:
|
||||||
|
data = []byte(v)
|
||||||
|
default:
|
||||||
|
return errors.NewInternalf(errors.CodeInternal, "tried to scan from %T instead of string or bytes", src)
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Unmarshal(data, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value serializes data to db
|
||||||
|
func (c *InstalledIntegrationConfig) Value() (driver.Value, error) {
|
||||||
|
filterSetJson, err := json.Marshal(c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.WrapInternalf(err, errors.CodeInternal, "could not serialize integration config to JSON")
|
||||||
|
}
|
||||||
|
return filterSetJson, nil
|
||||||
|
}
|
||||||
21
pkg/types/integrationtypes/store.go
Normal file
21
pkg/types/integrationtypes/store.go
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
package integrationtypes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrCodeCloudIntegrationAccountNotFound errors.Code = errors.MustNewCode("cloud_integration_account_not_found")
|
||||||
|
)
|
||||||
|
|
||||||
|
// CloudIntegrationAccountsStore defines the interface for cloud integration accounts persistence.
|
||||||
|
type Store interface {
|
||||||
|
ListConnected(ctx context.Context, orgId string, provider string) ([]CloudIntegration, error)
|
||||||
|
Get(ctx context.Context, orgId string, provider string, id string) (*CloudIntegration, error)
|
||||||
|
GetConnectedCloudAccount(ctx context.Context, orgId, provider string, accountID string) (*CloudIntegration, error)
|
||||||
|
// Upsert inserts an account or updates it by (cloudProvider, id) for specified non-empty fields.
|
||||||
|
Upsert(ctx context.Context, orgId string, provider string, id *string, config []byte, accountId *string, agentReport *AgentReport, removedAt *time.Time) (*CloudIntegration, error)
|
||||||
|
}
|
||||||
@@ -2,6 +2,7 @@ package querybuildertypesv5
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||||
"github.com/SigNoz/signoz/pkg/errors"
|
"github.com/SigNoz/signoz/pkg/errors"
|
||||||
@@ -59,3 +60,16 @@ type TraceOperatorStatementBuilder interface {
|
|||||||
// Build builds the trace operator query.
|
// Build builds the trace operator query.
|
||||||
Build(ctx context.Context, start, end uint64, requestType RequestType, query QueryBuilderTraceOperator, compositeQuery *CompositeQuery) (*Statement, error)
|
Build(ctx context.Context, start, end uint64, requestType RequestType, query QueryBuilderTraceOperator, compositeQuery *CompositeQuery) (*Statement, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type KeyEvolutionMetadataKey struct {
|
||||||
|
BaseColumn string
|
||||||
|
BaseColumnType string
|
||||||
|
NewColumn string
|
||||||
|
NewColumnType string
|
||||||
|
ReleaseTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type KeyEvolutionMetadataStore interface {
|
||||||
|
Get(keyName string) []*KeyEvolutionMetadataKey
|
||||||
|
Add(keyName string, key *KeyEvolutionMetadataKey)
|
||||||
|
}
|
||||||
|
|||||||
@@ -355,6 +355,10 @@ func (r *PostableRule) validate() error {
|
|||||||
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "composite query is required"))
|
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "composite query is required"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if r.Version != "v5" {
|
||||||
|
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "only version v5 is supported, got %q", r.Version))
|
||||||
|
}
|
||||||
|
|
||||||
if isAllQueriesDisabled(r.RuleCondition.CompositeQuery) {
|
if isAllQueriesDisabled(r.RuleCondition.CompositeQuery) {
|
||||||
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "all queries are disabled in rule condition"))
|
errs = append(errs, signozError.NewInvalidInputf(signozError.CodeInvalidInput, "all queries are disabled in rule condition"))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -108,6 +108,7 @@ func TestParseIntoRule(t *testing.T) {
|
|||||||
"ruleType": "threshold_rule",
|
"ruleType": "threshold_rule",
|
||||||
"evalWindow": "5m",
|
"evalWindow": "5m",
|
||||||
"frequency": "1m",
|
"frequency": "1m",
|
||||||
|
"version": "v5",
|
||||||
"condition": {
|
"condition": {
|
||||||
"compositeQuery": {
|
"compositeQuery": {
|
||||||
"queryType": "builder",
|
"queryType": "builder",
|
||||||
@@ -150,6 +151,7 @@ func TestParseIntoRule(t *testing.T) {
|
|||||||
content: []byte(`{
|
content: []byte(`{
|
||||||
"alert": "DefaultsRule",
|
"alert": "DefaultsRule",
|
||||||
"ruleType": "threshold_rule",
|
"ruleType": "threshold_rule",
|
||||||
|
"version": "v5",
|
||||||
"condition": {
|
"condition": {
|
||||||
"compositeQuery": {
|
"compositeQuery": {
|
||||||
"queryType": "builder",
|
"queryType": "builder",
|
||||||
@@ -187,6 +189,7 @@ func TestParseIntoRule(t *testing.T) {
|
|||||||
initRule: PostableRule{},
|
initRule: PostableRule{},
|
||||||
content: []byte(`{
|
content: []byte(`{
|
||||||
"alert": "PromQLRule",
|
"alert": "PromQLRule",
|
||||||
|
"version": "v5",
|
||||||
"condition": {
|
"condition": {
|
||||||
"compositeQuery": {
|
"compositeQuery": {
|
||||||
"queryType": "promql",
|
"queryType": "promql",
|
||||||
@@ -256,6 +259,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
|
|||||||
content: []byte(`{
|
content: []byte(`{
|
||||||
"alert": "SeverityLabelTest",
|
"alert": "SeverityLabelTest",
|
||||||
"schemaVersion": "v1",
|
"schemaVersion": "v1",
|
||||||
|
"version": "v5",
|
||||||
"condition": {
|
"condition": {
|
||||||
"compositeQuery": {
|
"compositeQuery": {
|
||||||
"queryType": "builder",
|
"queryType": "builder",
|
||||||
@@ -344,6 +348,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
|
|||||||
content: []byte(`{
|
content: []byte(`{
|
||||||
"alert": "NoLabelsTest",
|
"alert": "NoLabelsTest",
|
||||||
"schemaVersion": "v1",
|
"schemaVersion": "v1",
|
||||||
|
"version": "v5",
|
||||||
"condition": {
|
"condition": {
|
||||||
"compositeQuery": {
|
"compositeQuery": {
|
||||||
"queryType": "builder",
|
"queryType": "builder",
|
||||||
@@ -384,6 +389,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
|
|||||||
content: []byte(`{
|
content: []byte(`{
|
||||||
"alert": "OverwriteTest",
|
"alert": "OverwriteTest",
|
||||||
"schemaVersion": "v1",
|
"schemaVersion": "v1",
|
||||||
|
"version": "v5",
|
||||||
"condition": {
|
"condition": {
|
||||||
"compositeQuery": {
|
"compositeQuery": {
|
||||||
"queryType": "builder",
|
"queryType": "builder",
|
||||||
@@ -474,6 +480,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
|
|||||||
content: []byte(`{
|
content: []byte(`{
|
||||||
"alert": "V2Test",
|
"alert": "V2Test",
|
||||||
"schemaVersion": "v2",
|
"schemaVersion": "v2",
|
||||||
|
"version": "v5",
|
||||||
"condition": {
|
"condition": {
|
||||||
"compositeQuery": {
|
"compositeQuery": {
|
||||||
"queryType": "builder",
|
"queryType": "builder",
|
||||||
@@ -517,6 +524,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
|
|||||||
initRule: PostableRule{},
|
initRule: PostableRule{},
|
||||||
content: []byte(`{
|
content: []byte(`{
|
||||||
"alert": "DefaultSchemaTest",
|
"alert": "DefaultSchemaTest",
|
||||||
|
"version": "v5",
|
||||||
"condition": {
|
"condition": {
|
||||||
"compositeQuery": {
|
"compositeQuery": {
|
||||||
"queryType": "builder",
|
"queryType": "builder",
|
||||||
@@ -569,6 +577,7 @@ func TestParseIntoRuleSchemaVersioning(t *testing.T) {
|
|||||||
func TestParseIntoRuleThresholdGeneration(t *testing.T) {
|
func TestParseIntoRuleThresholdGeneration(t *testing.T) {
|
||||||
content := []byte(`{
|
content := []byte(`{
|
||||||
"alert": "TestThresholds",
|
"alert": "TestThresholds",
|
||||||
|
"version": "v5",
|
||||||
"condition": {
|
"condition": {
|
||||||
"compositeQuery": {
|
"compositeQuery": {
|
||||||
"queryType": "builder",
|
"queryType": "builder",
|
||||||
@@ -639,6 +648,7 @@ func TestParseIntoRuleMultipleThresholds(t *testing.T) {
|
|||||||
"schemaVersion": "v2",
|
"schemaVersion": "v2",
|
||||||
"alert": "MultiThresholdAlert",
|
"alert": "MultiThresholdAlert",
|
||||||
"ruleType": "threshold_rule",
|
"ruleType": "threshold_rule",
|
||||||
|
"version": "v5",
|
||||||
"condition": {
|
"condition": {
|
||||||
"compositeQuery": {
|
"compositeQuery": {
|
||||||
"queryType": "builder",
|
"queryType": "builder",
|
||||||
@@ -732,6 +742,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
|||||||
ruleJSON: []byte(`{
|
ruleJSON: []byte(`{
|
||||||
"alert": "AnomalyBelowTest",
|
"alert": "AnomalyBelowTest",
|
||||||
"ruleType": "anomaly_rule",
|
"ruleType": "anomaly_rule",
|
||||||
|
"version": "v5",
|
||||||
"condition": {
|
"condition": {
|
||||||
"compositeQuery": {
|
"compositeQuery": {
|
||||||
"queryType": "builder",
|
"queryType": "builder",
|
||||||
@@ -766,6 +777,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
|||||||
ruleJSON: []byte(`{
|
ruleJSON: []byte(`{
|
||||||
"alert": "AnomalyBelowTest",
|
"alert": "AnomalyBelowTest",
|
||||||
"ruleType": "anomaly_rule",
|
"ruleType": "anomaly_rule",
|
||||||
|
"version": "v5",
|
||||||
"condition": {
|
"condition": {
|
||||||
"compositeQuery": {
|
"compositeQuery": {
|
||||||
"queryType": "builder",
|
"queryType": "builder",
|
||||||
@@ -799,6 +811,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
|||||||
ruleJSON: []byte(`{
|
ruleJSON: []byte(`{
|
||||||
"alert": "AnomalyAboveTest",
|
"alert": "AnomalyAboveTest",
|
||||||
"ruleType": "anomaly_rule",
|
"ruleType": "anomaly_rule",
|
||||||
|
"version": "v5",
|
||||||
"condition": {
|
"condition": {
|
||||||
"compositeQuery": {
|
"compositeQuery": {
|
||||||
"queryType": "builder",
|
"queryType": "builder",
|
||||||
@@ -833,6 +846,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
|||||||
ruleJSON: []byte(`{
|
ruleJSON: []byte(`{
|
||||||
"alert": "AnomalyAboveTest",
|
"alert": "AnomalyAboveTest",
|
||||||
"ruleType": "anomaly_rule",
|
"ruleType": "anomaly_rule",
|
||||||
|
"version": "v5",
|
||||||
"condition": {
|
"condition": {
|
||||||
"compositeQuery": {
|
"compositeQuery": {
|
||||||
"queryType": "builder",
|
"queryType": "builder",
|
||||||
@@ -866,6 +880,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
|||||||
ruleJSON: []byte(`{
|
ruleJSON: []byte(`{
|
||||||
"alert": "AnomalyBelowAllTest",
|
"alert": "AnomalyBelowAllTest",
|
||||||
"ruleType": "anomaly_rule",
|
"ruleType": "anomaly_rule",
|
||||||
|
"version": "v5",
|
||||||
"condition": {
|
"condition": {
|
||||||
"compositeQuery": {
|
"compositeQuery": {
|
||||||
"queryType": "builder",
|
"queryType": "builder",
|
||||||
@@ -901,6 +916,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
|||||||
ruleJSON: []byte(`{
|
ruleJSON: []byte(`{
|
||||||
"alert": "AnomalyBelowAllTest",
|
"alert": "AnomalyBelowAllTest",
|
||||||
"ruleType": "anomaly_rule",
|
"ruleType": "anomaly_rule",
|
||||||
|
"version": "v5",
|
||||||
"condition": {
|
"condition": {
|
||||||
"compositeQuery": {
|
"compositeQuery": {
|
||||||
"queryType": "builder",
|
"queryType": "builder",
|
||||||
@@ -935,6 +951,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
|||||||
ruleJSON: []byte(`{
|
ruleJSON: []byte(`{
|
||||||
"alert": "AnomalyOutOfBoundsTest",
|
"alert": "AnomalyOutOfBoundsTest",
|
||||||
"ruleType": "anomaly_rule",
|
"ruleType": "anomaly_rule",
|
||||||
|
"version": "v5",
|
||||||
"condition": {
|
"condition": {
|
||||||
"compositeQuery": {
|
"compositeQuery": {
|
||||||
"queryType": "builder",
|
"queryType": "builder",
|
||||||
@@ -969,6 +986,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
|||||||
ruleJSON: []byte(`{
|
ruleJSON: []byte(`{
|
||||||
"alert": "ThresholdTest",
|
"alert": "ThresholdTest",
|
||||||
"ruleType": "threshold_rule",
|
"ruleType": "threshold_rule",
|
||||||
|
"version": "v5",
|
||||||
"condition": {
|
"condition": {
|
||||||
"compositeQuery": {
|
"compositeQuery": {
|
||||||
"queryType": "builder",
|
"queryType": "builder",
|
||||||
@@ -1003,6 +1021,7 @@ func TestAnomalyNegationEval(t *testing.T) {
|
|||||||
ruleJSON: []byte(`{
|
ruleJSON: []byte(`{
|
||||||
"alert": "ThresholdTest",
|
"alert": "ThresholdTest",
|
||||||
"ruleType": "threshold_rule",
|
"ruleType": "threshold_rule",
|
||||||
|
"version": "v5",
|
||||||
"condition": {
|
"condition": {
|
||||||
"compositeQuery": {
|
"compositeQuery": {
|
||||||
"queryType": "builder",
|
"queryType": "builder",
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ type MetadataStore interface {
|
|||||||
ListLogsJSONIndexes(ctx context.Context, filters ...string) (map[string][]schemamigrator.Index, error)
|
ListLogsJSONIndexes(ctx context.Context, filters ...string) (map[string][]schemamigrator.Index, error)
|
||||||
|
|
||||||
// ListPromotedPaths lists the promoted paths.
|
// ListPromotedPaths lists the promoted paths.
|
||||||
ListPromotedPaths(ctx context.Context, paths ...string) (map[string]struct{}, error)
|
GetPromotedPaths(ctx context.Context, paths ...string) (map[string]bool, error)
|
||||||
|
|
||||||
// PromotePaths promotes the paths.
|
// PromotePaths promotes the paths.
|
||||||
PromotePaths(ctx context.Context, paths ...string) error
|
PromotePaths(ctx context.Context, paths ...string) error
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type MockMetadataStore struct {
|
|||||||
RelatedValuesMap map[string][]string
|
RelatedValuesMap map[string][]string
|
||||||
AllValuesMap map[string]*telemetrytypes.TelemetryFieldValues
|
AllValuesMap map[string]*telemetrytypes.TelemetryFieldValues
|
||||||
TemporalityMap map[string]metrictypes.Temporality
|
TemporalityMap map[string]metrictypes.Temporality
|
||||||
PromotedPathsMap map[string]struct{}
|
PromotedPathsMap map[string]bool
|
||||||
LogsJSONIndexesMap map[string][]schemamigrator.Index
|
LogsJSONIndexesMap map[string][]schemamigrator.Index
|
||||||
LookupKeysMap map[telemetrytypes.MetricMetadataLookupKey]int64
|
LookupKeysMap map[telemetrytypes.MetricMetadataLookupKey]int64
|
||||||
}
|
}
|
||||||
@@ -28,7 +28,7 @@ func NewMockMetadataStore() *MockMetadataStore {
|
|||||||
RelatedValuesMap: make(map[string][]string),
|
RelatedValuesMap: make(map[string][]string),
|
||||||
AllValuesMap: make(map[string]*telemetrytypes.TelemetryFieldValues),
|
AllValuesMap: make(map[string]*telemetrytypes.TelemetryFieldValues),
|
||||||
TemporalityMap: make(map[string]metrictypes.Temporality),
|
TemporalityMap: make(map[string]metrictypes.Temporality),
|
||||||
PromotedPathsMap: make(map[string]struct{}),
|
PromotedPathsMap: make(map[string]bool),
|
||||||
LogsJSONIndexesMap: make(map[string][]schemamigrator.Index),
|
LogsJSONIndexesMap: make(map[string][]schemamigrator.Index),
|
||||||
LookupKeysMap: make(map[telemetrytypes.MetricMetadataLookupKey]int64),
|
LookupKeysMap: make(map[telemetrytypes.MetricMetadataLookupKey]int64),
|
||||||
}
|
}
|
||||||
@@ -295,13 +295,13 @@ func (m *MockMetadataStore) SetTemporality(metricName string, temporality metric
|
|||||||
// PromotePaths promotes the paths.
|
// PromotePaths promotes the paths.
|
||||||
func (m *MockMetadataStore) PromotePaths(ctx context.Context, paths ...string) error {
|
func (m *MockMetadataStore) PromotePaths(ctx context.Context, paths ...string) error {
|
||||||
for _, path := range paths {
|
for _, path := range paths {
|
||||||
m.PromotedPathsMap[path] = struct{}{}
|
m.PromotedPathsMap[path] = true
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListPromotedPaths lists the promoted paths.
|
// GetPromotedPaths returns the promoted paths.
|
||||||
func (m *MockMetadataStore) ListPromotedPaths(ctx context.Context, paths ...string) (map[string]struct{}, error) {
|
func (m *MockMetadataStore) GetPromotedPaths(ctx context.Context, paths ...string) (map[string]bool, error) {
|
||||||
return m.PromotedPathsMap, nil
|
return m.PromotedPathsMap, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user