Compare commits

..

57 Commits

Author SHA1 Message Date
swapnil-signoz
09b3437f58 Merge branch 'feat/cloud-integrations-apis' into feat/azure-integration 2026-02-23 20:14:45 +05:30
swapnil-signoz
5a762c678e Merge branch 'main' into feat/cloud-integrations-apis 2026-02-23 20:11:36 +05:30
swapnil-signoz
fe18b92662 Merge branch 'main' into feat/azure-integration 2026-02-23 20:11:26 +05:30
swapnil-signoz
cbbbe6a0ed refactor: update generic type parameters in cloud provider and integration types 2026-02-23 17:18:42 +05:30
swapnil-signoz
20f63765dd refactor: updating agent version 2026-02-23 16:31:53 +05:30
swapnil-signoz
ce9869ec12 feat: moving methods to generic base provider 2026-02-23 16:24:10 +05:30
swapnil-signoz
e23b1b0abc Merge branch 'feat/cloud-integrations-apis' into feat/azure-integration 2026-02-23 10:46:51 +05:30
swapnil-signoz
55b1311f78 Merge branch 'main' into feat/cloud-integrations-apis 2026-02-23 10:45:09 +05:30
swapnil-signoz
e432db7300 Merge branch 'main' into feat/azure-integration 2026-02-22 23:46:11 +05:30
swapnil-signoz
3583176d26 feat: adding base provider 2026-02-22 23:32:47 +05:30
Swapnil Nakade
55198b82fb Merge branch 'feat/cloud-integrations-apis' into feat/azure-integration 2026-02-20 15:49:37 +05:30
Swapnil Nakade
e985d9c843 chore: merge back 2026-02-20 15:32:46 +05:30
Swapnil Nakade
a95091d145 feat: add ingestion status check for metrics and logs in Azure integration 2026-02-20 15:16:41 +05:30
swapnil-signoz
59668698a2 refactor: using QBv5 for connection status (#10288)
* refactor: updating azure cloud integrations api

* feat: extending cloud integration apis

* refactor: updating cloud integration apis

* refactor: updating cloud-integration controller

* refactor: updating cloud provider type

* refactor: updating service details api

* refactor: code beautification

* refactor: sorting services list for consistency

* refactor: cloud integration API cleanup

* refactor: reverting

* ci: fixing lint ci issues

* feat: adding logs

* fix: aws connection url generation

* refactor: removing comment

* chore: wip

* refactor: wip

* refactor:  updating response for conn status

* refactor: removing wrong deps

* feat: adding comments

* feat: adding default values for azure service config

* feat: updating service definitions

* refactor: updating azure telemetry collections strat

* refactor: aws cloud integration provider impl

* refactor: improving aws cloud integration apis structure

* feat: adding logger in opts

* feat: updating cloud integration aws api

* refactor: using QBv5 for connection status

* refactor: status check

* fix: handle unexpected query response types for service metrics and logs

* refactor: implement centralized panic recovery

* refactor: simplify AWS cloud provider initialization by removing unused reader parameter
2026-02-19 16:19:38 +05:30
Swapnil Nakade
961217c51f feat: implement service connection status retrieval for Azure integration 2026-02-18 15:16:13 +05:30
swapnil-signoz
22ed687d44 Refactor/aws cloudintegration (#10339)
* refactor: updating azure cloud integrations api

* feat: extending cloud integration apis

* refactor: updating cloud integration apis

* refactor: updating cloud-integration controller

* refactor: updating cloud provider type

* refactor: updating service details api

* refactor: code beautification

* refactor: sorting services list for consistency

* refactor: cloud integration API cleanup

* refactor: reverting

* ci: fixing lint ci issues

* feat: adding logs

* fix: aws connection url generation

* refactor: removing comment

* chore: wip

* refactor: wip

* refactor:  updating response for conn status

* refactor: removing wrong deps

* feat: adding comments

* feat: adding default values for azure service config

* feat: updating service definitions

* refactor: updating azure telemetry collections strat

* refactor: aws cloud integration provider impl

* refactor: improving aws cloud integration apis structure

* feat: adding logger in opts

* feat: updating cloud integration aws api

* feat: using generics

* fix: update error handling in cloud integration for deployment info response

* refactor: optimize dashboard retrieval logic in AWS provider

* refactor: update GetDashboardsFromAssets to include orgID parameter
2026-02-18 12:09:35 +05:30
swapnil-signoz
1da016cf1a Feat: improving cloud integrations APIs (#10280)
* refactor: updating azure cloud integrations api

* feat: extending cloud integration apis

* refactor: updating cloud integration apis

* refactor: updating cloud-integration controller

* refactor: updating cloud provider type

* refactor: updating service details api

* refactor: code beautification

* refactor: sorting services list for consistency

* refactor: cloud integration API cleanup

* refactor: reverting

* ci: fixing lint ci issues

* feat: adding logs

* fix: aws connection url generation

* refactor: removing comment

* chore: wip

* refactor: wip

* refactor:  updating response for conn status

* refactor: removing wrong deps

* feat: adding comments

* feat: adding default values for azure service config

* feat: updating service definitions

* refactor: updating azure telemetry collections strat

* refactor: aws cloud integration provider impl

* refactor: improving aws cloud integration apis structure

* feat: adding logger in opts

* feat: updating cloud integration aws api

* feat: using generics
2026-02-18 11:43:26 +05:30
Swapnil Nakade
7653be06e6 refactor: removing dummy dashboards 2026-02-13 18:54:54 +05:30
Swapnil Nakade
9d53ee5053 feat: adding more generic struct 2026-02-13 18:49:31 +05:30
Piyush Singariya
c62b4d9141 test: testing generics 2026-02-13 16:06:57 +05:30
Swapnil Nakade
5720fcb654 feat: adding azure cloud integration apis 2026-02-12 23:49:43 +05:30
Swapnil Nakade
d32911b0fd Merge branch 'main' into refactor/aws-cloudintegration 2026-02-12 22:59:03 +05:30
Swapnil Nakade
22fcb7e9fb feat: updating cloud integration aws api 2026-02-12 22:53:26 +05:30
Swapnil Nakade
e8d009d225 feat: adding logger in opts 2026-02-12 16:41:23 +05:30
Swapnil Nakade
25b143d21a Merge branch 'main' into refactor/aws-cloudintegration 2026-02-12 16:35:41 +05:30
Swapnil Nakade
4487050375 refactor: improving aws cloud integration apis structure 2026-02-12 16:34:16 +05:30
Swapnil Nakade
f3732611ca refactor: aws cloud integration provider impl 2026-02-12 15:08:55 +05:30
Swapnil Nakade
989ca522f8 refactor: updating azure telemetry collections strat 2026-02-12 14:54:24 +05:30
Swapnil Nakade
9a2e9d76b5 feat: updating service definitions 2026-02-10 20:11:40 +05:30
Swapnil Nakade
2be42deecd Merge branch 'main' into feat/azure-integration 2026-02-10 18:44:40 +05:30
Swapnil Nakade
95cad880cc feat: adding default values for azure service config 2026-02-10 18:21:26 +05:30
Swapnil Nakade
cfef1091b3 Merge branch 'main' into feat/azure-integration 2026-02-09 15:38:17 +05:30
Swapnil Nakade
4504c364f2 feat: adding comments 2026-02-09 15:37:10 +05:30
Swapnil Nakade
1a006870e1 refactor: removing wrong deps 2026-02-09 15:24:02 +05:30
Swapnil Nakade
e7a27a1cfb Merge branch 'main' into feat/azure-integration 2026-02-09 15:19:36 +05:30
Swapnil Nakade
1e7323ead2 refactor: updating response for conn status 2026-02-08 22:56:04 +05:30
Swapnil Nakade
af4c6c5b52 Merge branch 'main' into feat/azure-integration 2026-02-08 18:24:47 +05:30
Swapnil Nakade
02262ba245 refactor: wip 2026-02-08 17:28:04 +05:30
Swapnil Nakade
df7c9e1339 chore: wip 2026-02-08 00:09:29 +05:30
Swapnil Nakade
ac5e52479f refactor: removing comment 2026-02-06 17:48:49 +05:30
Swapnil Nakade
de56477bbb Merge branch 'main' into feat/azure-integration 2026-02-06 17:45:40 +05:30
Swapnil Nakade
fddd8a27fa fix: aws connection url generation 2026-02-06 17:44:06 +05:30
Swapnil Nakade
2aa4f8e237 feat: adding logs 2026-02-06 11:57:23 +05:30
Swapnil Nakade
74006a214b ci: fixing lint ci issues 2026-02-06 04:01:39 +05:30
Swapnil Nakade
ed2cbacadc Merge branch 'main' into feat/azure-integration 2026-02-06 03:55:33 +05:30
Swapnil Nakade
3cbd529843 refactor: reverting 2026-02-06 03:51:29 +05:30
Swapnil Nakade
78b481e895 refactor: cloud integration API cleanup 2026-02-06 03:42:25 +05:30
Swapnil Nakade
215098ec0d refactor: sorting services list for consistency 2026-02-03 13:59:26 +05:30
Swapnil Nakade
5a4ef2e4ce refactor: code beautification 2026-02-03 01:16:58 +05:30
Swapnil Nakade
b1f33c4f7f refactor: updating service details api 2026-02-03 00:52:27 +05:30
Swapnil Nakade
713c84b1e4 refactor: updating cloud provider type 2026-02-03 00:10:18 +05:30
Swapnil Nakade
c3daf9e428 Merge branch 'main' into feat/azure-integration 2026-02-02 23:37:45 +05:30
Swapnil Nakade
70a908deb1 refactor: updating cloud-integration controller 2026-02-02 23:36:46 +05:30
Swapnil Nakade
cc9cdded3c refactor: updating cloud integration apis 2026-02-02 21:50:15 +05:30
Swapnil Nakade
77067cd614 feat: extending cloud integration apis 2026-01-29 12:20:19 +05:30
Swapnil Nakade
ab703d9a65 Merge branch 'main' into feat/azure-integration 2026-01-28 18:28:16 +05:30
Swapnil Nakade
611e8fbf9e refactor: updating azure cloud integrations api 2026-01-28 11:18:58 +05:30
117 changed files with 8398 additions and 7030 deletions

View File

@@ -41,23 +41,31 @@ services:
interval: 30s
timeout: 5s
retries: 3
telemetrystore-migrator:
image: signoz/signoz-otel-collector:v0.142.0
container_name: telemetrystore-migrator
environment:
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_REPLICATION=true
- SIGNOZ_OTEL_COLLECTOR_TIMEOUT=10m
entrypoint:
- /bin/sh
schema-migrator-sync:
image: signoz/signoz-schema-migrator:v0.142.0
container_name: schema-migrator-sync
command:
- -c
- |
/signoz-otel-collector migrate bootstrap &&
/signoz-otel-collector migrate sync up &&
/signoz-otel-collector migrate async up
- sync
- --cluster-name=cluster
- --dsn=tcp://clickhouse:9000
- --replication=true
- --up=
depends_on:
clickhouse:
condition: service_healthy
restart: on-failure
schema-migrator-async:
image: signoz/signoz-schema-migrator:v0.142.0
container_name: schema-migrator-async
command:
- async
- --cluster-name=cluster
- --dsn=tcp://clickhouse:9000
- --replication=true
- --up=
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-sync:
condition: service_completed_successfully
restart: on-failure

View File

@@ -1,23 +1,14 @@
services:
signoz-otel-collector:
image: signoz/signoz-otel-collector:v0.142.0
image: signoz/signoz-otel-collector:v0.129.6
container_name: signoz-otel-collector-dev
entrypoint:
- /bin/sh
command:
- -c
- |
/signoz-otel-collector migrate sync check &&
/signoz-otel-collector --config=/etc/otel-collector-config.yaml
- --config=/etc/otel-collector-config.yaml
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- LOW_CARDINAL_EXCEPTION_GROUPING=false
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_REPLICATION=true
- SIGNOZ_OTEL_COLLECTOR_TIMEOUT=10m
ports:
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver

View File

@@ -48,13 +48,12 @@ jobs:
- role
- ttl
- alerts
- ingestionkeys
sqlstore-provider:
- postgres
- sqlite
clickhouse-version:
- 25.5.6
- 25.12.5
- 25.10.5
schema-migrator-version:
- v0.142.0
postgres-version:

View File

@@ -318,5 +318,4 @@ user:
# The password of the root user. Must meet password requirements.
password: ""
# The name of the organization to create or look up for the root user.
org:
name: default
org_name: default

View File

@@ -61,6 +61,7 @@ x-db-depend: &db-depend
- clickhouse
- clickhouse-2
- clickhouse-3
- schema-migrator
services:
init-clickhouse:
!!merge <<: *common
@@ -135,17 +136,12 @@ services:
# - "9000:9000"
# - "8123:8123"
# - "9181:9181"
configs:
- source: clickhouse-config
target: /etc/clickhouse-server/config.xml
- source: clickhouse-users
target: /etc/clickhouse-server/users.xml
- source: clickhouse-custom-function
target: /etc/clickhouse-server/custom-function.xml
- source: clickhouse-cluster
target: /etc/clickhouse-server/config.d/cluster.ha.xml
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- ./clickhouse-setup/data/clickhouse/:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
clickhouse-2:
@@ -155,17 +151,12 @@ services:
# - "9001:9000"
# - "8124:8123"
# - "9182:9181"
configs:
- source: clickhouse-config
target: /etc/clickhouse-server/config.xml
- source: clickhouse-users
target: /etc/clickhouse-server/users.xml
- source: clickhouse-custom-function
target: /etc/clickhouse-server/custom-function.xml
- source: clickhouse-cluster
target: /etc/clickhouse-server/config.d/cluster.ha.xml
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- ./clickhouse-setup/data/clickhouse-2/:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
clickhouse-3:
@@ -175,32 +166,37 @@ services:
# - "9002:9000"
# - "8125:8123"
# - "9183:9181"
configs:
- source: clickhouse-config
target: /etc/clickhouse-server/config.xml
- source: clickhouse-users
target: /etc/clickhouse-server/users.xml
- source: clickhouse-custom-function
target: /etc/clickhouse-server/custom-function.xml
- source: clickhouse-cluster
target: /etc/clickhouse-server/config.d/cluster.ha.xml
volumes:
- ../common/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ../common/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- ../common/clickhouse/custom-function.xml:/etc/clickhouse-server/custom-function.xml
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
- ../common/clickhouse/cluster.ha.xml:/etc/clickhouse-server/config.d/cluster.xml
- ./clickhouse-setup/data/clickhouse-3/:/var/lib/clickhouse/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:v0.112.1
image: signoz/signoz:v0.112.0
command:
- --config=/root/config/prometheus.yml
ports:
- "8080:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- ./clickhouse-setup/data/signoz/:/var/lib/signoz/
environment:
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
- SIGNOZ_TOKENIZER_JWT_SECRET=secret
- DOT_METRICS_ENABLED=true
healthcheck:
test:
- CMD
@@ -213,48 +209,40 @@ services:
retries: 3
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:v0.142.1
entrypoint:
- /bin/sh
image: signoz/signoz-otel-collector:v0.142.0
command:
- -c
- |
/signoz-otel-collector migrate sync check &&
/signoz-otel-collector --config=/etc/otel-collector-config.yaml --manager-config=/etc/manager-config.yaml --copy-path=/var/tmp/collector-config.yaml
configs:
- source: otel-collector-config
target: /etc/otel-collector-config.yaml
- source: otel-manager-config
target: /etc/manager-config.yaml
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
- --copy-path=/var/tmp/collector-config.yaml
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
- LOW_CARDINAL_EXCEPTION_GROUPING=false
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_REPLICATION=true
- SIGNOZ_OTEL_COLLECTOR_TIMEOUT=10m
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
deploy:
replicas: 3
signoz-telemetrystore-migrator:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.142.0}
environment:
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_REPLICATION=true
- SIGNOZ_OTEL_COLLECTOR_TIMEOUT=10m
entrypoint:
- /bin/sh
depends_on:
- clickhouse
- schema-migrator
- signoz
schema-migrator:
!!merge <<: *common
image: signoz/signoz-schema-migrator:v0.142.0
deploy:
restart_policy:
condition: on-failure
delay: 5s
entrypoint: sh
command:
- -c
- |
/signoz-otel-collector migrate bootstrap &&
/signoz-otel-collector migrate sync up &&
/signoz-otel-collector migrate async up
- "/signoz-schema-migrator sync --dsn=tcp://clickhouse:9000 --up= && /signoz-schema-migrator async --dsn=tcp://clickhouse:9000 --up="
depends_on:
- clickhouse
networks:
signoz-net:
name: signoz-net
@@ -273,16 +261,3 @@ volumes:
name: signoz-zookeeper-2
zookeeper-3:
name: signoz-zookeeper-3
configs:
clickhouse-config:
file: ../common/clickhouse/config.xml
clickhouse-users:
file: ../common/clickhouse/users.xml
clickhouse-custom-function:
file: ../common/clickhouse/custom-function.xml
clickhouse-cluster:
file: ../common/clickhouse/cluster.ha.xml
otel-collector-config:
file: ./otel-collector-config.yaml
otel-manager-config:
file: ../common/signoz/otel-collector-opamp-config.yaml

View File

@@ -58,6 +58,7 @@ x-db-depend: &db-depend
!!merge <<: *common
depends_on:
- clickhouse
- schema-migrator
services:
init-clickhouse:
!!merge <<: *common
@@ -113,20 +114,30 @@ services:
target: /etc/clickhouse-server/config.d/cluster.xml
volumes:
- clickhouse:/var/lib/clickhouse/
- ../common/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts/
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:v0.112.1
image: signoz/signoz:v0.112.0
command:
- --config=/root/config/prometheus.yml
ports:
- "8080:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- sqlite:/var/lib/signoz/
configs:
- source: signoz-prometheus-config
target: /root/config/prometheus.yml
environment:
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- SIGNOZ_TOKENIZER_JWT_SECRET=secret
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
- DOT_METRICS_ENABLED=true
healthcheck:
test:
- CMD
@@ -139,14 +150,11 @@ services:
retries: 3
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:v0.142.1
entrypoint:
- /bin/sh
image: signoz/signoz-otel-collector:v0.142.0
command:
- -c
- |
/signoz-otel-collector migrate sync check &&
/signoz-otel-collector --config=/etc/otel-collector-config.yaml --manager-config=/etc/manager-config.yaml --copy-path=/var/tmp/collector-config.yaml
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
- --copy-path=/var/tmp/collector-config.yaml
configs:
- source: otel-collector-config
target: /etc/otel-collector-config.yaml
@@ -155,32 +163,29 @@ services:
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
- LOW_CARDINAL_EXCEPTION_GROUPING=false
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_REPLICATION=true
- SIGNOZ_OTEL_COLLECTOR_TIMEOUT=10m
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
deploy:
replicas: 3
signoz-telemetrystore-migrator:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.142.0}
environment:
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_REPLICATION=true
- SIGNOZ_OTEL_COLLECTOR_TIMEOUT=10m
entrypoint:
- /bin/sh
depends_on:
- clickhouse
- schema-migrator
- signoz
schema-migrator:
!!merge <<: *common
image: signoz/signoz-schema-migrator:v0.142.0
deploy:
restart_policy:
condition: on-failure
delay: 5s
entrypoint: sh
command:
- -c
- |
/signoz-otel-collector migrate bootstrap &&
/signoz-otel-collector migrate sync up &&
/signoz-otel-collector migrate async up
- "/signoz-schema-migrator sync --dsn=tcp://clickhouse:9000 --up= && /signoz-schema-migrator async --dsn=tcp://clickhouse:9000 --up="
depends_on:
- clickhouse
networks:
signoz-net:
name: signoz-net
@@ -200,6 +205,14 @@ configs:
file: ../common/clickhouse/custom-function.xml
clickhouse-cluster:
file: ../common/clickhouse/cluster.xml
signoz-prometheus-config:
file: ../common/signoz/prometheus.yml
# If you have multiple dashboard files, you can list them individually:
# dashboard-foo:
# file: ../common/dashboards/foo.json
# dashboard-bar:
# file: ../common/dashboards/bar.json
otel-collector-config:
file: ./otel-collector-config.yaml
otel-manager-config:

View File

@@ -62,10 +62,8 @@ x-db-depend: &db-depend
depends_on:
clickhouse:
condition: service_healthy
clickhouse-2:
condition: service_healthy
clickhouse-3:
condition: service_healthy
schema-migrator-sync:
condition: service_completed_successfully
services:
init-clickhouse:
!!merge <<: *common
@@ -181,17 +179,27 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:${VERSION:-v0.112.1}
image: signoz/signoz:${VERSION:-v0.112.0}
container_name: signoz
command:
- --config=/root/config/prometheus.yml
ports:
- "8080:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- SIGNOZ_TOKENIZER_JWT_SECRET=secret
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
- DOT_METRICS_ENABLED=true
healthcheck:
test:
- CMD
@@ -202,48 +210,51 @@ services:
interval: 30s
timeout: 5s
retries: 3
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.142.1}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.142.0}
container_name: signoz-otel-collector
entrypoint:
- /bin/sh
command:
- -c
- |
/signoz-otel-collector migrate sync check &&
/signoz-otel-collector --config=/etc/otel-collector-config.yaml --manager-config=/etc/manager-config.yaml --copy-path=/var/tmp/collector-config.yaml
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
- --copy-path=/var/tmp/collector-config.yaml
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- LOW_CARDINAL_EXCEPTION_GROUPING=false
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_REPLICATION=true
- SIGNOZ_OTEL_COLLECTOR_TIMEOUT=10m
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
signoz-telemetrystore-migrator:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.142.0}
container_name: signoz-telemetrystore-migrator
environment:
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_REPLICATION=true
- SIGNOZ_OTEL_COLLECTOR_TIMEOUT=10m
entrypoint:
- /bin/sh
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-sync:
condition: service_completed_successfully
signoz:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.142.0}
container_name: schema-migrator-sync
command:
- -c
- |
/signoz-otel-collector migrate bootstrap &&
/signoz-otel-collector migrate sync up &&
/signoz-otel-collector migrate async up
- sync
- --dsn=tcp://clickhouse:9000
- --up=
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.142.0}
container_name: schema-migrator-async
command:
- async
- --dsn=tcp://clickhouse:9000
- --up=
restart: on-failure
networks:
signoz-net:

View File

@@ -57,6 +57,8 @@ x-db-depend: &db-depend
depends_on:
clickhouse:
condition: service_healthy
schema-migrator-sync:
condition: service_completed_successfully
services:
init-clickhouse:
!!merge <<: *common
@@ -109,17 +111,27 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:${VERSION:-v0.112.1}
image: signoz/signoz:${VERSION:-v0.112.0}
container_name: signoz
command:
- --config=/root/config/prometheus.yml
ports:
- "8080:8080" # signoz port
# - "6060:6060" # pprof port
volumes:
- ../common/signoz/prometheus.yml:/root/config/prometheus.yml
- ../common/dashboards:/root/config/dashboards
- sqlite:/var/lib/signoz/
environment:
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- SIGNOZ_TOKENIZER_JWT_SECRET=secret
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-standalone-amd
- DOT_METRICS_ENABLED=true
healthcheck:
test:
- CMD
@@ -132,46 +144,45 @@ services:
retries: 3
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.142.1}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.142.0}
container_name: signoz-otel-collector
entrypoint:
- /bin/sh
command:
- -c
- |
/signoz-otel-collector migrate sync check &&
/signoz-otel-collector --config=/etc/otel-collector-config.yaml --manager-config=/etc/manager-config.yaml --copy-path=/var/tmp/collector-config.yaml
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
- --copy-path=/var/tmp/collector-config.yaml
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
- ../common/signoz/otel-collector-opamp-config.yaml:/etc/manager-config.yaml
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name=signoz-host,os.type=linux
- LOW_CARDINAL_EXCEPTION_GROUPING=false
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_REPLICATION=true
- SIGNOZ_OTEL_COLLECTOR_TIMEOUT=10m
ports:
# - "1777:1777" # pprof extension
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
signoz-telemetrystore-migrator:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.142.0}
container_name: signoz-telemetrystore-migrator
environment:
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_CLUSTER=cluster
- SIGNOZ_OTEL_COLLECTOR_CLICKHOUSE_REPLICATION=true
- SIGNOZ_OTEL_COLLECTOR_TIMEOUT=10m
entrypoint:
- /bin/sh
depends_on:
signoz:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.142.0}
container_name: schema-migrator-sync
command:
- -c
- |
/signoz-otel-collector migrate bootstrap &&
/signoz-otel-collector migrate sync up &&
/signoz-otel-collector migrate async up
- sync
- --dsn=tcp://clickhouse:9000
- --up=
depends_on:
clickhouse:
condition: service_healthy
restart: on-failure
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.142.0}
container_name: schema-migrator-async
command:
- async
- --dsn=tcp://clickhouse:9000
- --up=
restart: on-failure
networks:
signoz-net:

View File

@@ -80,37 +80,6 @@ components:
updatedAt:
format: date-time
type: string
required:
- id
type: object
AuthtypesGettableObjects:
properties:
resource:
$ref: '#/components/schemas/AuthtypesResource'
selectors:
items:
type: string
type: array
required:
- resource
- selectors
type: object
AuthtypesGettableResources:
properties:
relations:
additionalProperties:
items:
type: string
type: array
nullable: true
type: object
resources:
items:
$ref: '#/components/schemas/AuthtypesResource'
type: array
required:
- resources
- relations
type: object
AuthtypesGettableToken:
properties:
@@ -161,6 +130,8 @@ components:
serviceAccountJson:
type: string
type: object
AuthtypesName:
type: object
AuthtypesOIDCConfig:
properties:
claimMapping:
@@ -183,7 +154,7 @@ components:
resource:
$ref: '#/components/schemas/AuthtypesResource'
selector:
type: string
$ref: '#/components/schemas/AuthtypesSelector'
required:
- resource
- selector
@@ -204,22 +175,6 @@ components:
provider:
type: string
type: object
AuthtypesPatchableObjects:
properties:
additions:
items:
$ref: '#/components/schemas/AuthtypesGettableObjects'
nullable: true
type: array
deletions:
items:
$ref: '#/components/schemas/AuthtypesGettableObjects'
nullable: true
type: array
required:
- additions
- deletions
type: object
AuthtypesPostableAuthDomain:
properties:
config:
@@ -244,7 +199,7 @@ components:
AuthtypesResource:
properties:
name:
type: string
$ref: '#/components/schemas/AuthtypesName'
type:
type: string
required:
@@ -276,6 +231,8 @@ components:
samlIdp:
type: string
type: object
AuthtypesSelector:
type: object
AuthtypesSessionContext:
properties:
exists:
@@ -288,6 +245,8 @@ components:
type: object
AuthtypesTransaction:
properties:
id:
type: string
object:
$ref: '#/components/schemas/AuthtypesObject'
relation:
@@ -501,10 +460,10 @@ components:
GatewaytypesLimitValue:
properties:
count:
nullable: true
format: int64
type: integer
size:
nullable: true
format: int64
type: integer
type: object
GatewaytypesPagination:
@@ -1709,6 +1668,40 @@ components:
- status
- error
type: object
RoletypesGettableResources:
properties:
relations:
additionalProperties:
items:
type: string
type: array
nullable: true
type: object
resources:
items:
$ref: '#/components/schemas/AuthtypesResource'
nullable: true
type: array
required:
- resources
- relations
type: object
RoletypesPatchableObjects:
properties:
additions:
items:
$ref: '#/components/schemas/AuthtypesObject'
nullable: true
type: array
deletions:
items:
$ref: '#/components/schemas/AuthtypesObject'
nullable: true
type: array
required:
- additions
- deletions
type: object
RoletypesPatchableRole:
properties:
description:
@@ -1744,7 +1737,6 @@ components:
format: date-time
type: string
required:
- id
- name
- description
- type
@@ -1882,8 +1874,6 @@ components:
$ref: '#/components/schemas/TypesUser'
userId:
type: string
required:
- id
type: object
TypesGettableGlobalConfig:
properties:
@@ -1896,8 +1886,6 @@ components:
properties:
id:
type: string
required:
- id
type: object
TypesInvite:
properties:
@@ -1921,8 +1909,6 @@ components:
updatedAt:
format: date-time
type: string
required:
- id
type: object
TypesOrganization:
properties:
@@ -1943,8 +1929,6 @@ components:
updatedAt:
format: date-time
type: string
required:
- id
type: object
TypesPostableAPIKey:
properties:
@@ -2008,8 +1992,6 @@ components:
type: string
token:
type: string
required:
- id
type: object
TypesStorableAPIKey:
properties:
@@ -2035,8 +2017,6 @@ components:
type: string
userId:
type: string
required:
- id
type: object
TypesUser:
properties:
@@ -2058,8 +2038,6 @@ components:
updatedAt:
format: date-time
type: string
required:
- id
type: object
ZeustypesGettableHost:
properties:
@@ -2192,35 +2170,6 @@ paths:
summary: Check permissions
tags:
- authz
/api/v1/authz/resources:
get:
deprecated: false
description: Gets all the available resources
operationId: AuthzResources
responses:
"200":
content:
application/json:
schema:
properties:
data:
$ref: '#/components/schemas/AuthtypesGettableResources'
status:
type: string
required:
- status
- data
type: object
description: OK
"500":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Internal Server Error
summary: Get resources
tags:
- authz
/api/v1/changePassword/{id}:
post:
deprecated: false
@@ -4393,7 +4342,7 @@ paths:
properties:
data:
items:
$ref: '#/components/schemas/AuthtypesGettableObjects'
$ref: '#/components/schemas/AuthtypesObject'
type: array
status:
type: string
@@ -4466,7 +4415,7 @@ paths:
content:
application/json:
schema:
$ref: '#/components/schemas/AuthtypesPatchableObjects'
$ref: '#/components/schemas/RoletypesPatchableObjects'
responses:
"204":
content:
@@ -4524,6 +4473,52 @@ paths:
summary: Patch objects for a role by relation
tags:
- role
/api/v1/roles/resources:
get:
deprecated: false
description: Gets all the available resources for role assignment
operationId: GetResources
responses:
"200":
content:
application/json:
schema:
properties:
data:
$ref: '#/components/schemas/RoletypesGettableResources'
status:
type: string
required:
- status
- data
type: object
description: OK
"401":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Unauthorized
"403":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Forbidden
"500":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Internal Server Error
security:
- api_key:
- ADMIN
- tokenizer:
- ADMIN
summary: Get resources
tags:
- role
/api/v1/user:
get:
deprecated: false
@@ -5096,7 +5091,7 @@ paths:
schema:
$ref: '#/components/schemas/GatewaytypesPostableIngestionKey'
responses:
"201":
"200":
content:
application/json:
schema:
@@ -5109,7 +5104,7 @@ paths:
- status
- data
type: object
description: Created
description: OK
"401":
content:
application/json:
@@ -5537,12 +5532,6 @@ paths:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Forbidden
"404":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Not Found
"500":
content:
application/json:
@@ -5612,12 +5601,6 @@ paths:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Forbidden
"404":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Not Found
"500":
content:
application/json:
@@ -5676,12 +5659,6 @@ paths:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Forbidden
"404":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Not Found
"500":
content:
application/json:
@@ -5741,12 +5718,6 @@ paths:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Forbidden
"404":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Not Found
"500":
content:
application/json:

View File

@@ -171,6 +171,8 @@ func (provider *provider) GetResources(_ context.Context) []*authtypes.Resource
for _, register := range provider.registry {
typeables = append(typeables, register.MustGetTypeables()...)
}
// role module cannot self register itself!
typeables = append(typeables, provider.MustGetTypeables()...)
resources := make([]*authtypes.Resource, 0)
for _, typeable := range typeables {
@@ -257,7 +259,7 @@ func (provider *provider) Delete(ctx context.Context, orgID valuer.UUID, id valu
}
role := roletypes.NewRoleFromStorableRole(storableRole)
err = role.ErrIfManaged()
err = role.CanEditDelete()
if err != nil {
return err
}

View File

@@ -1,6 +1,7 @@
package api
import (
"log/slog"
"net/http"
"time"
@@ -10,7 +11,6 @@ import (
"github.com/SigNoz/signoz/pkg/global"
"github.com/SigNoz/signoz/pkg/http/middleware"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
@@ -27,12 +27,12 @@ type APIHandlerOptions struct {
RulesManager *rules.Manager
UsageManager *usage.Manager
IntegrationsController *integrations.Controller
CloudIntegrationsController *cloudintegrations.Controller
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
GatewayUrl string
// Querier Influx Interval
FluxInterval time.Duration
GlobalConfig global.Config
Logger *slog.Logger // this is present in Signoz.Instrumentation but adding for quick access
}
type APIHandler struct {
@@ -46,13 +46,13 @@ func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz, config signoz.
Reader: opts.DataConnector,
RuleManager: opts.RulesManager,
IntegrationsController: opts.IntegrationsController,
CloudIntegrationsController: opts.CloudIntegrationsController,
LogsParsingPipelineController: opts.LogsParsingPipelineController,
FluxInterval: opts.FluxInterval,
AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager),
LicensingAPI: httplicensing.NewLicensingAPI(signoz.Licensing),
Signoz: signoz,
QueryParserAPI: queryparser.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.QueryParser),
Logger: opts.Logger,
}, config)
if err != nil {
@@ -101,14 +101,12 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
}
func (ah *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *middleware.AuthZ) {
ah.APIHandler.RegisterCloudIntegrationsRoutes(router, am)
router.HandleFunc(
"/api/v1/cloud-integrations/{cloudProvider}/accounts/generate-connection-params",
am.EditAccess(ah.CloudIntegrationsGenerateConnectionParams),
).Methods(http.MethodGet)
}
func (ah *APIHandler) getVersion(w http.ResponseWriter, r *http.Request) {

View File

@@ -6,6 +6,7 @@ import (
"encoding/json"
"fmt"
"io"
"log/slog"
"net/http"
"strings"
"time"
@@ -13,20 +14,14 @@ import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/modules/user"
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/gorilla/mux"
"go.uber.org/zap"
)
type CloudIntegrationConnectionParamsResponse struct {
IngestionUrl string `json:"ingestion_url,omitempty"`
IngestionKey string `json:"ingestion_key,omitempty"`
SigNozAPIUrl string `json:"signoz_api_url,omitempty"`
SigNozAPIKey string `json:"signoz_api_key,omitempty"`
}
// TODO: move this file with other cloud integration related code
func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
@@ -41,23 +36,21 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
return
}
cloudProvider := mux.Vars(r)["cloudProvider"]
if cloudProvider != "aws" {
RespondError(w, basemodel.BadRequest(fmt.Errorf(
"cloud provider not supported: %s", cloudProvider,
)), nil)
cloudProviderString := mux.Vars(r)["cloudProvider"]
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
if err != nil {
render.Error(w, err)
return
}
apiKey, apiErr := ah.getOrCreateCloudIntegrationPAT(r.Context(), claims.OrgID, cloudProvider)
if apiErr != nil {
RespondError(w, basemodel.WrapApiError(
apiErr, "couldn't provision PAT for cloud integration:",
), nil)
apiKey, err := ah.getOrCreateCloudIntegrationPAT(r.Context(), claims.OrgID, cloudProvider)
if err != nil {
render.Error(w, err)
return
}
result := CloudIntegrationConnectionParamsResponse{
result := integrationtypes.GettableCloudIntegrationConnectionParams{
SigNozAPIKey: apiKey,
}
@@ -71,16 +64,17 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
// Return the API Key (PAT) even if the rest of the params can not be deduced.
// Params not returned from here will be requested from the user via form inputs.
// This enables gracefully degraded but working experience even for non-cloud deployments.
zap.L().Info("ingestion params and signoz api url can not be deduced since no license was found")
ah.Respond(w, result)
ah.opts.Logger.InfoContext(
r.Context(),
"ingestion params and signoz api url can not be deduced since no license was found",
)
render.Success(w, http.StatusOK, result)
return
}
signozApiUrl, apiErr := ah.getIngestionUrlAndSigNozAPIUrl(r.Context(), license.Key)
if apiErr != nil {
RespondError(w, basemodel.WrapApiError(
apiErr, "couldn't deduce ingestion url and signoz api url",
), nil)
signozApiUrl, err := ah.getIngestionUrlAndSigNozAPIUrl(r.Context(), license.Key)
if err != nil {
render.Error(w, err)
return
}
@@ -89,48 +83,41 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
gatewayUrl := ah.opts.GatewayUrl
if len(gatewayUrl) > 0 {
ingestionKey, apiErr := getOrCreateCloudProviderIngestionKey(
ingestionKeyString, err := ah.getOrCreateCloudProviderIngestionKey(
r.Context(), gatewayUrl, license.Key, cloudProvider,
)
if apiErr != nil {
RespondError(w, basemodel.WrapApiError(
apiErr, "couldn't get or create ingestion key",
), nil)
if err != nil {
render.Error(w, err)
return
}
result.IngestionKey = ingestionKey
result.IngestionKey = ingestionKeyString
} else {
zap.L().Info("ingestion key can't be deduced since no gateway url has been configured")
ah.opts.Logger.InfoContext(
r.Context(),
"ingestion key can't be deduced since no gateway url has been configured",
)
}
ah.Respond(w, result)
render.Success(w, http.StatusOK, result)
}
func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId string, cloudProvider string) (
string, *basemodel.ApiError,
) {
func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId string, cloudProvider valuer.String) (string, error) {
integrationPATName := fmt.Sprintf("%s integration", cloudProvider)
integrationUser, apiErr := ah.getOrCreateCloudIntegrationUser(ctx, orgId, cloudProvider)
if apiErr != nil {
return "", apiErr
integrationUser, err := ah.getOrCreateCloudIntegrationUser(ctx, orgId, cloudProvider)
if err != nil {
return "", err
}
orgIdUUID, err := valuer.NewUUID(orgId)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't parse orgId: %w", err,
))
return "", err
}
allPats, err := ah.Signoz.Modules.User.ListAPIKeys(ctx, orgIdUUID)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't list PATs: %w", err,
))
return "", err
}
for _, p := range allPats {
if p.UserID == integrationUser.ID && p.Name == integrationPATName {
@@ -138,9 +125,10 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
}
}
zap.L().Info(
ah.opts.Logger.InfoContext(
ctx,
"no PAT found for cloud integration, creating a new one",
zap.String("cloudProvider", cloudProvider),
slog.String("cloudProvider", cloudProvider.String()),
)
newPAT, err := types.NewStorableAPIKey(
@@ -150,68 +138,48 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
0,
)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't create cloud integration PAT: %w", err,
))
return "", err
}
err = ah.Signoz.Modules.User.CreateAPIKey(ctx, newPAT)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't create cloud integration PAT: %w", err,
))
return "", err
}
return newPAT.Token, nil
}
func (ah *APIHandler) getOrCreateCloudIntegrationUser(
ctx context.Context, orgId string, cloudProvider string,
) (*types.User, *basemodel.ApiError) {
cloudIntegrationUserName := fmt.Sprintf("%s-integration", cloudProvider)
// TODO: move this function out of handler and use proper module structure
func (ah *APIHandler) getOrCreateCloudIntegrationUser(ctx context.Context, orgId string, cloudProvider valuer.String) (*types.User, error) {
cloudIntegrationUserName := fmt.Sprintf("%s-integration", cloudProvider.String())
email := valuer.MustNewEmail(fmt.Sprintf("%s@signoz.io", cloudIntegrationUserName))
cloudIntegrationUser, err := types.NewUser(cloudIntegrationUserName, email, types.RoleViewer, valuer.MustNewUUID(orgId))
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf("couldn't create cloud integration user: %w", err))
return nil, err
}
password := types.MustGenerateFactorPassword(cloudIntegrationUser.ID.StringValue())
cloudIntegrationUser, err = ah.Signoz.Modules.User.GetOrCreateUser(ctx, cloudIntegrationUser, user.WithFactorPassword(password))
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf("couldn't look for integration user: %w", err))
return nil, err
}
return cloudIntegrationUser, nil
}
func (ah *APIHandler) getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licenseKey string) (
string, *basemodel.ApiError,
) {
// TODO: remove this struct from here
type deploymentResponse struct {
Name string `json:"name"`
ClusterInfo struct {
Region struct {
DNS string `json:"dns"`
} `json:"region"`
} `json:"cluster"`
}
// TODO: move this function out of handler and use proper module structure
func (ah *APIHandler) getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licenseKey string) (string, error) {
respBytes, err := ah.Signoz.Zeus.GetDeployment(ctx, licenseKey)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't query for deployment info: error: %w", err,
))
return "", errors.WrapInternalf(err, errors.CodeInternal, "couldn't query for deployment info: error")
}
resp := new(deploymentResponse)
resp := new(integrationtypes.GettableDeployment)
err = json.Unmarshal(respBytes, resp)
if err != nil {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't unmarshal deployment info response: error: %w", err,
))
return "", errors.WrapInternalf(err, errors.CodeInternal, "couldn't unmarshal deployment info response")
}
regionDns := resp.ClusterInfo.Region.DNS
@@ -219,9 +187,10 @@ func (ah *APIHandler) getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licens
if len(regionDns) < 1 || len(deploymentName) < 1 {
// Fail early if actual response structure and expectation here ever diverge
return "", basemodel.InternalError(fmt.Errorf(
return "", errors.NewInternalf(
errors.CodeInternal,
"deployment info response not in expected shape. couldn't determine region dns and deployment name",
))
)
}
signozApiUrl := fmt.Sprintf("https://%s.%s", deploymentName, regionDns)
@@ -229,102 +198,85 @@ func (ah *APIHandler) getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licens
return signozApiUrl, nil
}
type ingestionKey struct {
Name string `json:"name"`
Value string `json:"value"`
// other attributes from gateway response not included here since they are not being used.
}
type ingestionKeysSearchResponse struct {
Status string `json:"status"`
Data []ingestionKey `json:"data"`
Error string `json:"error"`
}
type createIngestionKeyResponse struct {
Status string `json:"status"`
Data ingestionKey `json:"data"`
Error string `json:"error"`
}
func getOrCreateCloudProviderIngestionKey(
ctx context.Context, gatewayUrl string, licenseKey string, cloudProvider string,
) (string, *basemodel.ApiError) {
func (ah *APIHandler) getOrCreateCloudProviderIngestionKey(
ctx context.Context, gatewayUrl string, licenseKey string, cloudProvider valuer.String,
) (string, error) {
cloudProviderKeyName := fmt.Sprintf("%s-integration", cloudProvider)
// see if the key already exists
searchResult, apiErr := requestGateway[ingestionKeysSearchResponse](
searchResult, err := requestGateway[integrationtypes.GettableIngestionKeysSearch](
ctx,
gatewayUrl,
licenseKey,
fmt.Sprintf("/v1/workspaces/me/keys/search?name=%s", cloudProviderKeyName),
nil,
ah.opts.Logger,
)
if apiErr != nil {
return "", basemodel.WrapApiError(
apiErr, "couldn't search for cloudprovider ingestion key",
)
if err != nil {
return "", err
}
if searchResult.Status != "success" {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't search for cloudprovider ingestion key: status: %s, error: %s",
return "", errors.NewInternalf(
errors.CodeInternal,
"couldn't search for cloud provider ingestion key: status: %s, error: %s",
searchResult.Status, searchResult.Error,
))
}
for _, k := range searchResult.Data {
if k.Name == cloudProviderKeyName {
if len(k.Value) < 1 {
// Fail early if actual response structure and expectation here ever diverge
return "", basemodel.InternalError(fmt.Errorf(
"ingestion keys search response not as expected",
))
}
return k.Value, nil
}
}
zap.L().Info(
"no existing ingestion key found for cloud integration, creating a new one",
zap.String("cloudProvider", cloudProvider),
)
createKeyResult, apiErr := requestGateway[createIngestionKeyResponse](
ctx, gatewayUrl, licenseKey, "/v1/workspaces/me/keys",
map[string]any{
"name": cloudProviderKeyName,
"tags": []string{"integration", cloudProvider},
},
)
if apiErr != nil {
return "", basemodel.WrapApiError(
apiErr, "couldn't create cloudprovider ingestion key",
)
}
for _, k := range searchResult.Data {
if k.Name != cloudProviderKeyName {
continue
}
if len(k.Value) < 1 {
// Fail early if actual response structure and expectation here ever diverge
return "", errors.NewInternalf(errors.CodeInternal, "ingestion keys search response not as expected")
}
return k.Value, nil
}
ah.opts.Logger.InfoContext(
ctx,
"no existing ingestion key found for cloud integration, creating a new one",
slog.String("cloudProvider", cloudProvider.String()),
)
createKeyResult, err := requestGateway[integrationtypes.GettableCreateIngestionKey](
ctx, gatewayUrl, licenseKey, "/v1/workspaces/me/keys",
map[string]any{
"name": cloudProviderKeyName,
"tags": []string{"integration", cloudProvider.String()},
},
ah.opts.Logger,
)
if err != nil {
return "", err
}
if createKeyResult.Status != "success" {
return "", basemodel.InternalError(fmt.Errorf(
"couldn't create cloudprovider ingestion key: status: %s, error: %s",
return "", errors.NewInternalf(
errors.CodeInternal,
"couldn't create cloud provider ingestion key: status: %s, error: %s",
createKeyResult.Status, createKeyResult.Error,
))
)
}
ingestionKey := createKeyResult.Data.Value
if len(ingestionKey) < 1 {
ingestionKeyString := createKeyResult.Data.Value
if len(ingestionKeyString) < 1 {
// Fail early if actual response structure and expectation here ever diverge
return "", basemodel.InternalError(fmt.Errorf(
return "", errors.NewInternalf(errors.CodeInternal,
"ingestion key creation response not as expected",
))
)
}
return ingestionKey, nil
return ingestionKeyString, nil
}
func requestGateway[ResponseType any](
ctx context.Context, gatewayUrl string, licenseKey string, path string, payload any,
) (*ResponseType, *basemodel.ApiError) {
ctx context.Context, gatewayUrl, licenseKey, path string, payload any, logger *slog.Logger,
) (*ResponseType, error) {
baseUrl := strings.TrimSuffix(gatewayUrl, "/")
reqUrl := fmt.Sprintf("%s%s", baseUrl, path)
@@ -335,13 +287,12 @@ func requestGateway[ResponseType any](
"X-Consumer-Groups": "ns:default",
}
return requestAndParseResponse[ResponseType](ctx, reqUrl, headers, payload)
return requestAndParseResponse[ResponseType](ctx, reqUrl, headers, payload, logger)
}
func requestAndParseResponse[ResponseType any](
ctx context.Context, url string, headers map[string]string, payload any,
) (*ResponseType, *basemodel.ApiError) {
ctx context.Context, url string, headers map[string]string, payload any, logger *slog.Logger,
) (*ResponseType, error) {
reqMethod := http.MethodGet
var reqBody io.Reader
if payload != nil {
@@ -349,18 +300,14 @@ func requestAndParseResponse[ResponseType any](
bodyJson, err := json.Marshal(payload)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf(
"couldn't serialize request payload to JSON: %w", err,
))
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't marshal payload")
}
reqBody = bytes.NewBuffer([]byte(bodyJson))
reqBody = bytes.NewBuffer(bodyJson)
}
req, err := http.NewRequestWithContext(ctx, reqMethod, url, reqBody)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf(
"couldn't prepare request: %w", err,
))
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't create req")
}
for k, v := range headers {
@@ -373,23 +320,26 @@ func requestAndParseResponse[ResponseType any](
response, err := client.Do(req)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf("couldn't make request: %w", err))
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't make req")
}
defer response.Body.Close()
defer func() {
err = response.Body.Close()
if err != nil {
logger.ErrorContext(ctx, "couldn't close response body", "error", err)
}
}()
respBody, err := io.ReadAll(response.Body)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf("couldn't read response: %w", err))
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't read response body")
}
var resp ResponseType
err = json.Unmarshal(respBody, &resp)
if err != nil {
return nil, basemodel.InternalError(fmt.Errorf(
"couldn't unmarshal gateway response into %T", resp,
))
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't unmarshal response body")
}
return &resp, nil

View File

@@ -37,7 +37,6 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
"github.com/SigNoz/signoz/pkg/query-service/app/opamp"
@@ -121,13 +120,6 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
)
}
cloudIntegrationsController, err := cloudintegrations.NewController(signoz.SQLStore)
if err != nil {
return nil, fmt.Errorf(
"couldn't create cloud provider integrations controller: %w", err,
)
}
// ingestion pipelines manager
logParsingPipelineController, err := logparsingpipeline.NewLogParsingPipelinesController(
signoz.SQLStore,
@@ -161,11 +153,11 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
RulesManager: rm,
UsageManager: usageManager,
IntegrationsController: integrationsController,
CloudIntegrationsController: cloudIntegrationsController,
LogsParsingPipelineController: logParsingPipelineController,
FluxInterval: config.Querier.FluxInterval,
GatewayUrl: config.Gateway.URL.String(),
GlobalConfig: config.Global,
Logger: signoz.Instrumentation.Logger(),
}
apiHandler, err := api.NewAPIHandler(apiOpts, signoz, config)

View File

@@ -308,15 +308,3 @@ export const PublicDashboardPage = Loadable(
/* webpackChunkName: "Public Dashboard Page" */ 'pages/PublicDashboard'
),
);
export const AlertTypeSelectionPage = Loadable(
() =>
import(
/* webpackChunkName: "Alert Type Selection Page" */ 'pages/AlertTypeSelection'
),
);
export const MeterExplorerPage = Loadable(
() =>
import(/* webpackChunkName: "Meter Explorer Page" */ 'pages/MeterExplorer'),
);

View File

@@ -1,10 +1,12 @@
import { RouteProps } from 'react-router-dom';
import ROUTES from 'constants/routes';
import AlertTypeSelectionPage from 'pages/AlertTypeSelection';
import MessagingQueues from 'pages/MessagingQueues';
import MeterExplorer from 'pages/MeterExplorer';
import {
AlertHistory,
AlertOverview,
AlertTypeSelectionPage,
AllAlertChannels,
AllErrors,
ApiMonitoring,
@@ -27,8 +29,6 @@ import {
LogsExplorer,
LogsIndexToFields,
LogsSaveViews,
MessagingQueuesMainPage,
MeterExplorerPage,
MetricsExplorer,
OldLogsExplorer,
Onboarding,
@@ -399,28 +399,28 @@ const routes: AppRoutes[] = [
{
path: ROUTES.MESSAGING_QUEUES_KAFKA,
exact: true,
component: MessagingQueuesMainPage,
component: MessagingQueues,
key: 'MESSAGING_QUEUES_KAFKA',
isPrivate: true,
},
{
path: ROUTES.MESSAGING_QUEUES_CELERY_TASK,
exact: true,
component: MessagingQueuesMainPage,
component: MessagingQueues,
key: 'MESSAGING_QUEUES_CELERY_TASK',
isPrivate: true,
},
{
path: ROUTES.MESSAGING_QUEUES_OVERVIEW,
exact: true,
component: MessagingQueuesMainPage,
component: MessagingQueues,
key: 'MESSAGING_QUEUES_OVERVIEW',
isPrivate: true,
},
{
path: ROUTES.MESSAGING_QUEUES_KAFKA_DETAIL,
exact: true,
component: MessagingQueuesMainPage,
component: MessagingQueues,
key: 'MESSAGING_QUEUES_KAFKA_DETAIL',
isPrivate: true,
},
@@ -463,21 +463,21 @@ const routes: AppRoutes[] = [
{
path: ROUTES.METER,
exact: true,
component: MeterExplorerPage,
component: MeterExplorer,
key: 'METER',
isPrivate: true,
},
{
path: ROUTES.METER_EXPLORER,
exact: true,
component: MeterExplorerPage,
component: MeterExplorer,
key: 'METER_EXPLORER',
isPrivate: true,
},
{
path: ROUTES.METER_EXPLORER_VIEWS,
exact: true,
component: MeterExplorerPage,
component: MeterExplorer,
key: 'METER_EXPLORER_VIEWS',
isPrivate: true,
},

View File

@@ -5,24 +5,17 @@
* SigNoz
*/
import type {
InvalidateOptions,
MutationFunction,
QueryClient,
QueryFunction,
QueryKey,
UseMutationOptions,
UseMutationResult,
UseQueryOptions,
UseQueryResult,
} from 'react-query';
import { useMutation, useQuery } from 'react-query';
import { useMutation } from 'react-query';
import type { BodyType, ErrorType } from '../../../generatedAPIInstance';
import { GeneratedAPIInstance } from '../../../generatedAPIInstance';
import type {
AuthtypesTransactionDTO,
AuthzCheck200,
AuthzResources200,
RenderErrorResponseDTO,
} from '../sigNoz.schemas';
@@ -113,88 +106,3 @@ export const useAuthzCheck = <
return useMutation(mutationOptions);
};
/**
* Gets all the available resources
* @summary Get resources
*/
export const authzResources = (signal?: AbortSignal) => {
return GeneratedAPIInstance<AuthzResources200>({
url: `/api/v1/authz/resources`,
method: 'GET',
signal,
});
};
export const getAuthzResourcesQueryKey = () => {
return [`/api/v1/authz/resources`] as const;
};
export const getAuthzResourcesQueryOptions = <
TData = Awaited<ReturnType<typeof authzResources>>,
TError = ErrorType<RenderErrorResponseDTO>
>(options?: {
query?: UseQueryOptions<
Awaited<ReturnType<typeof authzResources>>,
TError,
TData
>;
}) => {
const { query: queryOptions } = options ?? {};
const queryKey = queryOptions?.queryKey ?? getAuthzResourcesQueryKey();
const queryFn: QueryFunction<Awaited<ReturnType<typeof authzResources>>> = ({
signal,
}) => authzResources(signal);
return { queryKey, queryFn, ...queryOptions } as UseQueryOptions<
Awaited<ReturnType<typeof authzResources>>,
TError,
TData
> & { queryKey: QueryKey };
};
export type AuthzResourcesQueryResult = NonNullable<
Awaited<ReturnType<typeof authzResources>>
>;
export type AuthzResourcesQueryError = ErrorType<RenderErrorResponseDTO>;
/**
* @summary Get resources
*/
export function useAuthzResources<
TData = Awaited<ReturnType<typeof authzResources>>,
TError = ErrorType<RenderErrorResponseDTO>
>(options?: {
query?: UseQueryOptions<
Awaited<ReturnType<typeof authzResources>>,
TError,
TData
>;
}): UseQueryResult<TData, TError> & { queryKey: QueryKey } {
const queryOptions = getAuthzResourcesQueryOptions(options);
const query = useQuery(queryOptions) as UseQueryResult<TData, TError> & {
queryKey: QueryKey;
};
query.queryKey = queryOptions.queryKey;
return query;
}
/**
* @summary Get resources
*/
export const invalidateAuthzResources = async (
queryClient: QueryClient,
options?: InvalidateOptions,
): Promise<QueryClient> => {
await queryClient.invalidateQueries(
{ queryKey: getAuthzResourcesQueryKey() },
options,
);
return queryClient;
};

View File

@@ -20,7 +20,7 @@ import { useMutation, useQuery } from 'react-query';
import type { BodyType, ErrorType } from '../../../generatedAPIInstance';
import { GeneratedAPIInstance } from '../../../generatedAPIInstance';
import type {
CreateIngestionKey201,
CreateIngestionKey200,
CreateIngestionKeyLimit201,
CreateIngestionKeyLimitPathParameters,
DeleteIngestionKeyLimitPathParameters,
@@ -151,7 +151,7 @@ export const createIngestionKey = (
gatewaytypesPostableIngestionKeyDTO: BodyType<GatewaytypesPostableIngestionKeyDTO>,
signal?: AbortSignal,
) => {
return GeneratedAPIInstance<CreateIngestionKey201>({
return GeneratedAPIInstance<CreateIngestionKey200>({
url: `/api/v2/gateway/ingestion_keys`,
method: 'POST',
headers: { 'Content-Type': 'application/json' },

View File

@@ -20,17 +20,18 @@ import { useMutation, useQuery } from 'react-query';
import type { BodyType, ErrorType } from '../../../generatedAPIInstance';
import { GeneratedAPIInstance } from '../../../generatedAPIInstance';
import type {
AuthtypesPatchableObjectsDTO,
CreateRole201,
DeleteRolePathParameters,
GetObjects200,
GetObjectsPathParameters,
GetResources200,
GetRole200,
GetRolePathParameters,
ListRoles200,
PatchObjectsPathParameters,
PatchRolePathParameters,
RenderErrorResponseDTO,
RoletypesPatchableObjectsDTO,
RoletypesPatchableRoleDTO,
RoletypesPostableRoleDTO,
} from '../sigNoz.schemas';
@@ -574,13 +575,13 @@ export const invalidateGetObjects = async (
*/
export const patchObjects = (
{ id, relation }: PatchObjectsPathParameters,
authtypesPatchableObjectsDTO: BodyType<AuthtypesPatchableObjectsDTO>,
roletypesPatchableObjectsDTO: BodyType<RoletypesPatchableObjectsDTO>,
) => {
return GeneratedAPIInstance<string>({
url: `/api/v1/roles/${id}/relation/${relation}/objects`,
method: 'PATCH',
headers: { 'Content-Type': 'application/json' },
data: authtypesPatchableObjectsDTO,
data: roletypesPatchableObjectsDTO,
});
};
@@ -593,7 +594,7 @@ export const getPatchObjectsMutationOptions = <
TError,
{
pathParams: PatchObjectsPathParameters;
data: BodyType<AuthtypesPatchableObjectsDTO>;
data: BodyType<RoletypesPatchableObjectsDTO>;
},
TContext
>;
@@ -602,7 +603,7 @@ export const getPatchObjectsMutationOptions = <
TError,
{
pathParams: PatchObjectsPathParameters;
data: BodyType<AuthtypesPatchableObjectsDTO>;
data: BodyType<RoletypesPatchableObjectsDTO>;
},
TContext
> => {
@@ -619,7 +620,7 @@ export const getPatchObjectsMutationOptions = <
Awaited<ReturnType<typeof patchObjects>>,
{
pathParams: PatchObjectsPathParameters;
data: BodyType<AuthtypesPatchableObjectsDTO>;
data: BodyType<RoletypesPatchableObjectsDTO>;
}
> = (props) => {
const { pathParams, data } = props ?? {};
@@ -633,7 +634,7 @@ export const getPatchObjectsMutationOptions = <
export type PatchObjectsMutationResult = NonNullable<
Awaited<ReturnType<typeof patchObjects>>
>;
export type PatchObjectsMutationBody = BodyType<AuthtypesPatchableObjectsDTO>;
export type PatchObjectsMutationBody = BodyType<RoletypesPatchableObjectsDTO>;
export type PatchObjectsMutationError = ErrorType<RenderErrorResponseDTO>;
/**
@@ -648,7 +649,7 @@ export const usePatchObjects = <
TError,
{
pathParams: PatchObjectsPathParameters;
data: BodyType<AuthtypesPatchableObjectsDTO>;
data: BodyType<RoletypesPatchableObjectsDTO>;
},
TContext
>;
@@ -657,7 +658,7 @@ export const usePatchObjects = <
TError,
{
pathParams: PatchObjectsPathParameters;
data: BodyType<AuthtypesPatchableObjectsDTO>;
data: BodyType<RoletypesPatchableObjectsDTO>;
},
TContext
> => {
@@ -665,3 +666,88 @@ export const usePatchObjects = <
return useMutation(mutationOptions);
};
/**
* Gets all the available resources for role assignment
* @summary Get resources
*/
export const getResources = (signal?: AbortSignal) => {
return GeneratedAPIInstance<GetResources200>({
url: `/api/v1/roles/resources`,
method: 'GET',
signal,
});
};
export const getGetResourcesQueryKey = () => {
return [`/api/v1/roles/resources`] as const;
};
export const getGetResourcesQueryOptions = <
TData = Awaited<ReturnType<typeof getResources>>,
TError = ErrorType<RenderErrorResponseDTO>
>(options?: {
query?: UseQueryOptions<
Awaited<ReturnType<typeof getResources>>,
TError,
TData
>;
}) => {
const { query: queryOptions } = options ?? {};
const queryKey = queryOptions?.queryKey ?? getGetResourcesQueryKey();
const queryFn: QueryFunction<Awaited<ReturnType<typeof getResources>>> = ({
signal,
}) => getResources(signal);
return { queryKey, queryFn, ...queryOptions } as UseQueryOptions<
Awaited<ReturnType<typeof getResources>>,
TError,
TData
> & { queryKey: QueryKey };
};
export type GetResourcesQueryResult = NonNullable<
Awaited<ReturnType<typeof getResources>>
>;
export type GetResourcesQueryError = ErrorType<RenderErrorResponseDTO>;
/**
* @summary Get resources
*/
export function useGetResources<
TData = Awaited<ReturnType<typeof getResources>>,
TError = ErrorType<RenderErrorResponseDTO>
>(options?: {
query?: UseQueryOptions<
Awaited<ReturnType<typeof getResources>>,
TError,
TData
>;
}): UseQueryResult<TData, TError> & { queryKey: QueryKey } {
const queryOptions = getGetResourcesQueryOptions(options);
const query = useQuery(queryOptions) as UseQueryResult<TData, TError> & {
queryKey: QueryKey;
};
query.queryKey = queryOptions.queryKey;
return query;
}
/**
* @summary Get resources
*/
export const invalidateGetResources = async (
queryClient: QueryClient,
options?: InvalidateOptions,
): Promise<QueryClient> => {
await queryClient.invalidateQueries(
{ queryKey: getGetResourcesQueryKey() },
options,
);
return queryClient;
};

View File

@@ -81,7 +81,7 @@ export interface AuthtypesGettableAuthDomainDTO {
/**
* @type string
*/
id: string;
id?: string;
/**
* @type string
*/
@@ -108,33 +108,6 @@ export interface AuthtypesGettableAuthDomainDTO {
updatedAt?: Date;
}
export interface AuthtypesGettableObjectsDTO {
resource: AuthtypesResourceDTO;
/**
* @type array
*/
selectors: string[];
}
/**
* @nullable
*/
export type AuthtypesGettableResourcesDTORelations = {
[key: string]: string[];
} | null;
export interface AuthtypesGettableResourcesDTO {
/**
* @type object
* @nullable true
*/
relations: AuthtypesGettableResourcesDTORelations;
/**
* @type array
*/
resources: AuthtypesResourceDTO[];
}
export interface AuthtypesGettableTokenDTO {
/**
* @type string
@@ -209,6 +182,10 @@ export interface AuthtypesGoogleConfigDTO {
serviceAccountJson?: string;
}
export interface AuthtypesNameDTO {
[key: string]: unknown;
}
export interface AuthtypesOIDCConfigDTO {
claimMapping?: AuthtypesAttributeMappingDTO;
/**
@@ -239,10 +216,7 @@ export interface AuthtypesOIDCConfigDTO {
export interface AuthtypesObjectDTO {
resource: AuthtypesResourceDTO;
/**
* @type string
*/
selector: string;
selector: AuthtypesSelectorDTO;
}
export interface AuthtypesOrgSessionContextDTO {
@@ -265,19 +239,6 @@ export interface AuthtypesPasswordAuthNSupportDTO {
provider?: string;
}
export interface AuthtypesPatchableObjectsDTO {
/**
* @type array
* @nullable true
*/
additions: AuthtypesGettableObjectsDTO[] | null;
/**
* @type array
* @nullable true
*/
deletions: AuthtypesGettableObjectsDTO[] | null;
}
export interface AuthtypesPostableAuthDomainDTO {
config?: AuthtypesAuthDomainConfigDTO;
/**
@@ -309,10 +270,7 @@ export interface AuthtypesPostableRotateTokenDTO {
}
export interface AuthtypesResourceDTO {
/**
* @type string
*/
name: string;
name: AuthtypesNameDTO;
/**
* @type string
*/
@@ -362,6 +320,10 @@ export interface AuthtypesSamlConfigDTO {
samlIdp?: string;
}
export interface AuthtypesSelectorDTO {
[key: string]: unknown;
}
export interface AuthtypesSessionContextDTO {
/**
* @type boolean
@@ -375,6 +337,10 @@ export interface AuthtypesSessionContextDTO {
}
export interface AuthtypesTransactionDTO {
/**
* @type string
*/
id?: string;
object: AuthtypesObjectDTO;
/**
* @type string
@@ -657,14 +623,14 @@ export interface GatewaytypesLimitMetricValueDTO {
export interface GatewaytypesLimitValueDTO {
/**
* @type integer
* @nullable true
* @format int64
*/
count?: number | null;
count?: number;
/**
* @type integer
* @nullable true
* @format int64
*/
size?: number | null;
size?: number;
}
export interface GatewaytypesPaginationDTO {
@@ -2026,6 +1992,39 @@ export interface RenderErrorResponseDTO {
status: string;
}
/**
* @nullable
*/
export type RoletypesGettableResourcesDTORelations = {
[key: string]: string[];
} | null;
export interface RoletypesGettableResourcesDTO {
/**
* @type object
* @nullable true
*/
relations: RoletypesGettableResourcesDTORelations;
/**
* @type array
* @nullable true
*/
resources: AuthtypesResourceDTO[] | null;
}
export interface RoletypesPatchableObjectsDTO {
/**
* @type array
* @nullable true
*/
additions: AuthtypesObjectDTO[] | null;
/**
* @type array
* @nullable true
*/
deletions: AuthtypesObjectDTO[] | null;
}
export interface RoletypesPatchableRoleDTO {
/**
* @type string
@@ -2057,7 +2056,7 @@ export interface RoletypesRoleDTO {
/**
* @type string
*/
id: string;
id?: string;
/**
* @type string
*/
@@ -2198,7 +2197,7 @@ export interface TypesGettableAPIKeyDTO {
/**
* @type string
*/
id: string;
id?: string;
/**
* @type integer
* @format int64
@@ -2251,7 +2250,7 @@ export interface TypesIdentifiableDTO {
/**
* @type string
*/
id: string;
id?: string;
}
export interface TypesInviteDTO {
@@ -2267,7 +2266,7 @@ export interface TypesInviteDTO {
/**
* @type string
*/
id: string;
id?: string;
/**
* @type string
*/
@@ -2312,7 +2311,7 @@ export interface TypesOrganizationDTO {
/**
* @type string
*/
id: string;
id?: string;
/**
* @type integer
* @minimum 0
@@ -2418,7 +2417,7 @@ export interface TypesResetPasswordTokenDTO {
/**
* @type string
*/
id: string;
id?: string;
/**
* @type string
*/
@@ -2442,7 +2441,7 @@ export interface TypesStorableAPIKeyDTO {
/**
* @type string
*/
id: string;
id?: string;
/**
* @type string
*/
@@ -2491,7 +2490,7 @@ export interface TypesUserDTO {
/**
* @type string
*/
id: string;
id?: string;
/**
* @type boolean
*/
@@ -2607,14 +2606,6 @@ export type AuthzCheck200 = {
status: string;
};
export type AuthzResources200 = {
data: AuthtypesGettableResourcesDTO;
/**
* @type string
*/
status: string;
};
export type ChangePasswordPathParameters = {
id: string;
};
@@ -3026,7 +3017,7 @@ export type GetObjects200 = {
/**
* @type array
*/
data: AuthtypesGettableObjectsDTO[];
data: AuthtypesObjectDTO[];
/**
* @type string
*/
@@ -3037,6 +3028,14 @@ export type PatchObjectsPathParameters = {
id: string;
relation: string;
};
export type GetResources200 = {
data: RoletypesGettableResourcesDTO;
/**
* @type string
*/
status: string;
};
export type ListUsers200 = {
/**
* @type array
@@ -3138,7 +3137,7 @@ export type GetIngestionKeys200 = {
status: string;
};
export type CreateIngestionKey201 = {
export type CreateIngestionKey200 = {
data: GatewaytypesGettableCreatedIngestionKeyDTO;
/**
* @type string

View File

@@ -1,5 +1,4 @@
import { YAxisCategoryNames } from '../constants';
import { UniversalYAxisUnit, YAxisCategory } from '../types';
import { UniversalYAxisUnit } from '../types';
import {
getUniversalNameFromMetricUnit,
mapMetricUnitToUniversalUnit,
@@ -42,29 +41,29 @@ describe('YAxisUnitSelector utils', () => {
describe('mergeCategories', () => {
it('merges categories correctly', () => {
const categories1: YAxisCategory[] = [
const categories1 = [
{
name: YAxisCategoryNames.Data,
name: 'Data',
units: [
{ name: 'bytes', id: UniversalYAxisUnit.BYTES },
{ name: 'kilobytes', id: UniversalYAxisUnit.KILOBYTES },
],
},
];
const categories2: YAxisCategory[] = [
const categories2 = [
{
name: YAxisCategoryNames.Data,
name: 'Data',
units: [{ name: 'bits', id: UniversalYAxisUnit.BITS }],
},
{
name: YAxisCategoryNames.Time,
name: 'Time',
units: [{ name: 'seconds', id: UniversalYAxisUnit.SECONDS }],
},
];
const mergedCategories = mergeCategories(categories1, categories2);
expect(mergedCategories).toEqual([
{
name: YAxisCategoryNames.Data,
name: 'Data',
units: [
{ name: 'bytes', id: UniversalYAxisUnit.BYTES },
{ name: 'kilobytes', id: UniversalYAxisUnit.KILOBYTES },
@@ -72,7 +71,7 @@ describe('YAxisUnitSelector utils', () => {
],
},
{
name: YAxisCategoryNames.Time,
name: 'Time',
units: [{ name: 'seconds', id: UniversalYAxisUnit.SECONDS }],
},
]);

View File

@@ -1,36 +1,5 @@
import { UnitFamilyConfig, UniversalYAxisUnit, YAxisUnit } from './types';
export enum YAxisCategoryNames {
Time = 'Time',
Data = 'Data',
DataRate = 'Data Rate',
Count = 'Count',
Operations = 'Operations',
Percentage = 'Percentage',
Boolean = 'Boolean',
None = 'None',
HashRate = 'Hash Rate',
Miscellaneous = 'Miscellaneous',
Acceleration = 'Acceleration',
Angular = 'Angular',
Area = 'Area',
Flops = 'FLOPs',
Concentration = 'Concentration',
Currency = 'Currency',
Datetime = 'Datetime',
PowerElectrical = 'Power/Electrical',
Flow = 'Flow',
Force = 'Force',
Mass = 'Mass',
Length = 'Length',
Pressure = 'Pressure',
Radiation = 'Radiation',
RotationSpeed = 'Rotation Speed',
Temperature = 'Temperature',
Velocity = 'Velocity',
Volume = 'Volume',
}
// Mapping of universal y-axis units to their AWS, UCUM, and OpenMetrics equivalents (if available)
export const UniversalYAxisUnitMappings: Partial<
Record<UniversalYAxisUnit, Set<YAxisUnit> | null>

View File

@@ -1,11 +1,10 @@
import { Y_AXIS_UNIT_NAMES } from './constants';
import { YAxisCategoryNames } from './constants';
import { UniversalYAxisUnit, YAxisCategory } from './types';
// Base categories for the universal y-axis units
export const BASE_Y_AXIS_CATEGORIES: YAxisCategory[] = [
{
name: YAxisCategoryNames.Time,
name: 'Time',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.SECONDS],
@@ -38,7 +37,7 @@ export const BASE_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Data,
name: 'Data',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.BYTES],
@@ -155,7 +154,7 @@ export const BASE_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.DataRate,
name: 'Data Rate',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.BYTES_SECOND],
@@ -296,7 +295,7 @@ export const BASE_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Count,
name: 'Count',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.COUNT],
@@ -313,7 +312,7 @@ export const BASE_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Operations,
name: 'Operations',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.OPS_SECOND],
@@ -354,7 +353,7 @@ export const BASE_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Percentage,
name: 'Percentage',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.PERCENT],
@@ -367,7 +366,7 @@ export const BASE_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Boolean,
name: 'Boolean',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.TRUE_FALSE],
@@ -383,7 +382,7 @@ export const BASE_Y_AXIS_CATEGORIES: YAxisCategory[] = [
export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
{
name: YAxisCategoryNames.Time,
name: 'Time',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.DURATION_MS],
@@ -420,7 +419,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.DataRate,
name: 'Data Rate',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.DATA_RATE_PACKETS_PER_SECOND],
@@ -429,7 +428,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Boolean,
name: 'Boolean',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.ON_OFF],
@@ -438,7 +437,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.None,
name: 'None',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.NONE],
@@ -447,7 +446,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.HashRate,
name: 'Hash Rate',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.HASH_RATE_HASHES_PER_SECOND],
@@ -480,7 +479,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Miscellaneous,
name: 'Miscellaneous',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.MISC_STRING],
@@ -521,7 +520,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Acceleration,
name: 'Acceleration',
units: [
{
name:
@@ -542,7 +541,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Angular,
name: 'Angular',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.ANGULAR_DEGREE],
@@ -567,7 +566,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Area,
name: 'Area',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.AREA_SQUARE_METERS],
@@ -584,7 +583,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Flops,
name: 'FLOPs',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.FLOPS_FLOPS],
@@ -621,7 +620,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Concentration,
name: 'Concentration',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.CONCENTRATION_PPM],
@@ -678,7 +677,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Currency,
name: 'Currency',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.CURRENCY_USD],
@@ -775,7 +774,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Datetime,
name: 'Datetime',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.DATETIME_ISO],
@@ -812,7 +811,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.PowerElectrical,
name: 'Power/Electrical',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.POWER_WATT],
@@ -969,7 +968,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Flow,
name: 'Flow',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.FLOW_GALLONS_PER_MINUTE],
@@ -1006,7 +1005,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Force,
name: 'Force',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.FORCE_NEWTON_METERS],
@@ -1027,7 +1026,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Mass,
name: 'Mass',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.MASS_MILLIGRAM],
@@ -1052,7 +1051,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Length,
name: 'Length',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.LENGTH_MILLIMETER],
@@ -1081,7 +1080,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Pressure,
name: 'Pressure',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.PRESSURE_MILLIBAR],
@@ -1118,7 +1117,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Radiation,
name: 'Radiation',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.RADIATION_BECQUEREL],
@@ -1175,7 +1174,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.RotationSpeed,
name: 'Rotation Speed',
units: [
{
name:
@@ -1201,7 +1200,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Temperature,
name: 'Temperature',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.TEMPERATURE_CELSIUS],
@@ -1218,7 +1217,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Velocity,
name: 'Velocity',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.VELOCITY_METERS_PER_SECOND],
@@ -1239,7 +1238,7 @@ export const ADDITIONAL_Y_AXIS_CATEGORIES: YAxisCategory[] = [
],
},
{
name: YAxisCategoryNames.Volume,
name: 'Volume',
units: [
{
name: Y_AXIS_UNIT_NAMES[UniversalYAxisUnit.VOLUME_MILLILITER],

View File

@@ -1,5 +1,3 @@
import { YAxisCategoryNames } from './constants';
export interface YAxisUnitSelectorProps {
value: string | undefined;
onChange: (value: UniversalYAxisUnit) => void;
@@ -671,7 +669,7 @@ export interface UnitFamilyConfig {
}
export interface YAxisCategory {
name: YAxisCategoryNames;
name: string;
units: {
name: string;
id: UniversalYAxisUnit;

View File

@@ -172,51 +172,23 @@ function ExplorerOptions({
const { user } = useAppContext();
const handleConditionalQueryModification = useCallback(
// eslint-disable-next-line sonarjs/cognitive-complexity
(defaultQuery: Query | null): string => {
const queryToUse = defaultQuery || query;
if (!queryToUse) {
throw new Error('No query provided');
}
if (
queryToUse?.builder?.queryData?.[0]?.aggregateOperator !==
StringOperators.NOOP &&
sourcepage !== DataSource.LOGS
StringOperators.NOOP
) {
return JSON.stringify(queryToUse);
}
// Convert NOOP to COUNT for alerts and strip orderBy for logs
// Modify aggregateOperator to count, as noop is not supported in alerts
const modifiedQuery = cloneDeep(queryToUse);
if (modifiedQuery && modifiedQuery.builder?.queryData) {
modifiedQuery.builder.queryData = modifiedQuery.builder.queryData.map(
(item) => {
const updatedItem = { ...item };
if (updatedItem.aggregateOperator === StringOperators.NOOP) {
updatedItem.aggregateOperator = StringOperators.COUNT;
}
modifiedQuery.builder.queryData[0].aggregateOperator = StringOperators.COUNT;
// Alerts do not support order by on logs explorer queries
if (sourcepage === DataSource.LOGS && panelType === PANEL_TYPES.LIST) {
updatedItem.orderBy = [];
}
return updatedItem;
},
);
}
try {
return JSON.stringify(modifiedQuery);
} catch (err) {
throw new Error(
'Failed to stringify modified query: ' +
(err instanceof Error ? err.message : String(err)),
);
}
return JSON.stringify(modifiedQuery);
},
[panelType, query, sourcepage],
[query],
);
const onCreateAlertsHandler = useCallback(
@@ -785,9 +757,9 @@ function ExplorerOptions({
);
}, [
disabled,
query,
isOneChartPerQuery,
onCreateAlertsHandler,
query,
splitedQueries,
]);

View File

@@ -1,4 +1,3 @@
import { useHistory } from 'react-router-dom';
import { PANEL_TYPES } from 'constants/queryBuilder';
import { MOCK_QUERY } from 'container/QueryTable/Drilldown/__tests__/mockTableData';
import { useUpdateDashboard } from 'hooks/dashboard/useUpdateDashboard';
@@ -16,11 +15,6 @@ import { getExplorerToolBarVisibility } from '../utils';
// Mock dependencies
jest.mock('hooks/dashboard/useUpdateDashboard');
jest.mock('react-router-dom', () => ({
...jest.requireActual('react-router-dom'),
useHistory: jest.fn(),
}));
jest.mock('../utils', () => ({
getExplorerToolBarVisibility: jest.fn(),
generateRGBAFromHex: jest.fn(() => 'rgba(0, 0, 0, 0.08)'),
@@ -35,7 +29,6 @@ const mockGetExplorerToolBarVisibility = jest.mocked(
);
const mockUseUpdateDashboard = jest.mocked(useUpdateDashboard);
const mockUseHistory = jest.mocked(useHistory);
// Mock data
const TEST_QUERY_ID = 'test-query-id';
@@ -110,6 +103,7 @@ describe('ExplorerOptionWrapper', () => {
beforeEach(() => {
jest.clearAllMocks();
mockGetExplorerToolBarVisibility.mockReturnValue(true);
// Mock useUpdateDashboard to return a mutation object
mockUseUpdateDashboard.mockReturnValue(({
mutate: jest.fn(),
@@ -123,28 +117,6 @@ describe('ExplorerOptionWrapper', () => {
} as unknown) as ReturnType<typeof useUpdateDashboard>);
});
it('should navigate to alert creation page when "Create an Alert" is clicked in logs-explorer', async () => {
const user = userEvent.setup({ pointerEventsCheck: 0 });
const mockPush = jest.fn();
mockUseHistory.mockReturnValue(({
push: mockPush,
} as unknown) as ReturnType<typeof useHistory>);
renderExplorerOptionWrapper({ sourcepage: DataSource.LOGS });
const createAlertButton = screen.getByRole('button', {
name: 'Create an Alert',
});
await user.click(createAlertButton);
expect(mockPush).toHaveBeenCalledTimes(1);
const calledWith = mockPush.mock.calls[0][0] as string;
const [path, search = ''] = calledWith.split('?');
expect(path).toBe('/alerts/new');
const params = new URLSearchParams(search);
expect(params.has('compositeQuery')).toBe(true);
});
describe('onExport functionality', () => {
it('should call onExport when New Dashboard button is clicked in export modal', async () => {
const user = userEvent.setup({ pointerEventsCheck: 0 });

View File

@@ -18,8 +18,8 @@ jest.mock('lib/query/createTableColumnsFromQuery', () => ({
jest.mock('container/NewWidget/utils', () => ({
unitOptions: jest.fn(() => [
{ value: 'none', label: 'None' },
{ value: '%', label: 'Percent (0 - 100)' },
{ value: 'ms', label: 'Milliseconds (ms)' },
{ value: 'percent', label: 'Percent' },
{ value: 'ms', label: 'Milliseconds' },
]),
}));
@@ -39,7 +39,7 @@ const defaultProps = {
],
thresholdTableOptions: 'cpu_usage',
columnUnits: { cpu_usage: 'percent', memory_usage: 'bytes' },
yAxisUnit: '%',
yAxisUnit: 'percent',
moveThreshold: jest.fn(),
};

View File

@@ -0,0 +1,99 @@
import { Dispatch, SetStateAction, useEffect, useState } from 'react';
import { AutoComplete, Input, Typography } from 'antd';
import { find } from 'lodash-es';
import { flattenedCategories } from './dataFormatCategories';
const findCategoryById = (
searchValue: string,
): Record<string, string> | undefined =>
find(flattenedCategories, (option) => option.id === searchValue);
const findCategoryByName = (
searchValue: string,
): Record<string, string> | undefined =>
find(flattenedCategories, (option) => option.name === searchValue);
type OnSelectType = Dispatch<SetStateAction<string>> | ((val: string) => void);
/**
* @deprecated Use DashboardYAxisUnitSelectorWrapper instead.
*/
function YAxisUnitSelector({
value,
onSelect,
fieldLabel,
handleClear,
}: {
value: string;
onSelect: OnSelectType;
fieldLabel: string;
handleClear?: () => void;
}): JSX.Element {
const [inputValue, setInputValue] = useState('');
// Sync input value with the actual value prop
useEffect(() => {
const category = findCategoryById(value);
setInputValue(category?.name || '');
}, [value]);
const onSelectHandler = (selectedValue: string): void => {
const category = findCategoryByName(selectedValue);
if (category) {
onSelect(category.id);
setInputValue(selectedValue);
}
};
const onChangeHandler = (inputValue: string): void => {
setInputValue(inputValue);
// Clear the yAxisUnit if input is empty or doesn't match any option
if (!inputValue) {
onSelect('');
}
};
const onClearHandler = (): void => {
setInputValue('');
onSelect('');
if (handleClear) {
handleClear();
}
};
const options = flattenedCategories.map((options) => ({
value: options.name,
}));
return (
<div className="y-axis-unit-selector">
<Typography.Text className="heading">{fieldLabel}</Typography.Text>
<AutoComplete
style={{ width: '100%' }}
rootClassName="y-axis-root-popover"
options={options}
allowClear
value={inputValue}
onChange={onChangeHandler}
onClear={onClearHandler}
onSelect={onSelectHandler}
filterOption={(inputValue, option): boolean => {
if (option) {
return (
option.value.toUpperCase().indexOf(inputValue.toUpperCase()) !== -1
);
}
return false;
}}
>
<Input placeholder="Unit" rootClassName="input" />
</AutoComplete>
</div>
);
}
export default YAxisUnitSelector;
YAxisUnitSelector.defaultProps = {
handleClear: (): void => {},
};

View File

@@ -0,0 +1,240 @@
/* eslint-disable sonarjs/no-duplicate-string */
import { act } from 'react-dom/test-utils';
import { render, screen, waitFor } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import YAxisUnitSelector from '../YAxisUnitSelector';
// Mock the dataFormatCategories to have predictable test data
jest.mock('../dataFormatCategories', () => ({
flattenedCategories: [
{ id: 'seconds', name: 'seconds (s)' },
{ id: 'milliseconds', name: 'milliseconds (ms)' },
{ id: 'hours', name: 'hours (h)' },
{ id: 'minutes', name: 'minutes (m)' },
],
}));
const MOCK_SECONDS = 'seconds';
const MOCK_MILLISECONDS = 'milliseconds';
describe('YAxisUnitSelector', () => {
const defaultProps = {
value: MOCK_SECONDS,
onSelect: jest.fn(),
fieldLabel: 'Y Axis Unit',
handleClear: jest.fn(),
};
let user: ReturnType<typeof userEvent.setup>;
beforeEach(() => {
jest.clearAllMocks();
user = userEvent.setup();
});
afterEach(() => {
jest.restoreAllMocks();
});
describe('Rendering (Read) & (write)', () => {
it('renders with correct field label', () => {
render(
<YAxisUnitSelector
value={defaultProps.value}
onSelect={defaultProps.onSelect}
fieldLabel={defaultProps.fieldLabel}
handleClear={defaultProps.handleClear}
/>,
);
expect(screen.getByText('Y Axis Unit')).toBeInTheDocument();
const input = screen.getByRole('combobox');
expect(input).toHaveValue('seconds (s)');
});
it('renders with custom field label', () => {
render(
<YAxisUnitSelector
value={defaultProps.value}
onSelect={defaultProps.onSelect}
fieldLabel="Custom Unit Label"
handleClear={defaultProps.handleClear}
/>,
);
expect(screen.getByText('Custom Unit Label')).toBeInTheDocument();
});
it('displays empty input when value prop is empty', () => {
render(
<YAxisUnitSelector
value=""
onSelect={defaultProps.onSelect}
fieldLabel={defaultProps.fieldLabel}
handleClear={defaultProps.handleClear}
/>,
);
expect(screen.getByDisplayValue('')).toBeInTheDocument();
});
it('shows placeholder text', () => {
render(
<YAxisUnitSelector
value={defaultProps.value}
onSelect={defaultProps.onSelect}
fieldLabel={defaultProps.fieldLabel}
handleClear={defaultProps.handleClear}
/>,
);
expect(screen.getByPlaceholderText('Unit')).toBeInTheDocument();
});
it('handles numeric input', async () => {
render(
<YAxisUnitSelector
value={defaultProps.value}
onSelect={defaultProps.onSelect}
fieldLabel={defaultProps.fieldLabel}
handleClear={defaultProps.handleClear}
/>,
);
const input = screen.getByRole('combobox');
await user.clear(input);
await user.type(input, '12345');
expect(input).toHaveValue('12345');
});
it('handles mixed content input', async () => {
render(
<YAxisUnitSelector
value={defaultProps.value}
onSelect={defaultProps.onSelect}
fieldLabel={defaultProps.fieldLabel}
handleClear={defaultProps.handleClear}
/>,
);
const input = screen.getByRole('combobox');
await user.clear(input);
await user.type(input, 'Test123!@#');
expect(input).toHaveValue('Test123!@#');
});
});
describe('State Management', () => {
it('syncs input value with value prop changes', async () => {
const { rerender } = render(
<YAxisUnitSelector
value={defaultProps.value}
onSelect={defaultProps.onSelect}
fieldLabel={defaultProps.fieldLabel}
handleClear={defaultProps.handleClear}
/>,
);
const input = screen.getByRole('combobox');
// Initial value
expect(input).toHaveValue('seconds (s)');
// Change value prop
rerender(
<YAxisUnitSelector
value={MOCK_MILLISECONDS}
onSelect={defaultProps.onSelect}
fieldLabel={defaultProps.fieldLabel}
handleClear={defaultProps.handleClear}
/>,
);
await waitFor(() => {
expect(input).toHaveValue('milliseconds (ms)');
});
});
it('handles empty value prop correctly', async () => {
const { rerender } = render(
<YAxisUnitSelector
value={defaultProps.value}
onSelect={defaultProps.onSelect}
fieldLabel={defaultProps.fieldLabel}
handleClear={defaultProps.handleClear}
/>,
);
const input = screen.getByRole('combobox');
// Change to empty value
rerender(
<YAxisUnitSelector
value=""
onSelect={defaultProps.onSelect}
fieldLabel={defaultProps.fieldLabel}
handleClear={defaultProps.handleClear}
/>,
);
await waitFor(() => {
expect(input).toHaveValue('');
});
});
it('handles invalid value prop gracefully', async () => {
const { rerender } = render(
<YAxisUnitSelector
value={defaultProps.value}
onSelect={defaultProps.onSelect}
fieldLabel={defaultProps.fieldLabel}
handleClear={defaultProps.handleClear}
/>,
);
const input = screen.getByRole('combobox');
// Change to invalid value
rerender(
<YAxisUnitSelector
value="invalid_id"
onSelect={defaultProps.onSelect}
fieldLabel={defaultProps.fieldLabel}
handleClear={defaultProps.handleClear}
/>,
);
await waitFor(() => {
expect(input).toHaveValue('');
});
});
it('maintains local state during typing', async () => {
render(
<YAxisUnitSelector
value={defaultProps.value}
onSelect={defaultProps.onSelect}
fieldLabel={defaultProps.fieldLabel}
handleClear={defaultProps.handleClear}
/>,
);
const input = screen.getByRole('combobox');
// first clear then type
await user.clear(input);
await user.type(input, 'test');
expect(input).toHaveValue('test');
// Value prop change should not override local typing
await act(async () => {
// Simulate prop change
render(
<YAxisUnitSelector
value="bytes"
onSelect={defaultProps.onSelect}
fieldLabel={defaultProps.fieldLabel}
handleClear={defaultProps.handleClear}
/>,
);
});
// Local typing should be preserved
expect(input).toHaveValue('test');
});
});
});

View File

@@ -1,53 +1,613 @@
import { flattenDeep } from 'lodash-es';
import {
UniversalUnitToGrafanaUnit,
YAxisCategoryNames,
} from 'components/YAxisUnitSelector/constants';
import { YAxisSource } from 'components/YAxisUnitSelector/types';
import { getYAxisCategories } from 'components/YAxisUnitSelector/utils';
import { convertValue } from 'lib/getConvertedValue';
AccelerationFormats,
AngularFormats,
AreaFormats,
BooleanFormats,
CategoryNames,
ConcentrationFormats,
CurrencyFormats,
DataFormats,
DataRateFormats,
DataTypeCategories,
DatetimeFormats,
FlopsFormats,
FlowFormats,
ForceFormats,
HashRateFormats,
LengthFormats,
MassFormats,
MiscellaneousFormats,
PowerElectricalFormats,
PressureFormats,
RadiationFormats,
RotationSpeedFormats,
TemperatureFormats,
ThroughputFormats,
TimeFormats,
VelocityFormats,
VolumeFormats,
} from './types';
// Function to get the category name for a given unit ID (Grafana or universal)
export const getCategoryName = (unitId: string): YAxisCategoryNames | null => {
const categories = getYAxisCategories(YAxisSource.DASHBOARDS);
export const dataTypeCategories: DataTypeCategories = [
{
name: CategoryNames.Time,
formats: [
{ name: 'Hertz (1/s)', id: TimeFormats.Hertz },
{ name: 'nanoseconds (ns)', id: TimeFormats.Nanoseconds },
{ name: 'microseconds (µs)', id: TimeFormats.Microseconds },
{ name: 'milliseconds (ms)', id: TimeFormats.Milliseconds },
{ name: 'seconds (s)', id: TimeFormats.Seconds },
{ name: 'minutes (m)', id: TimeFormats.Minutes },
{ name: 'hours (h)', id: TimeFormats.Hours },
{ name: 'days (d)', id: TimeFormats.Days },
{ name: 'duration in ms (dtdurationms)', id: TimeFormats.DurationMs },
{ name: 'duration in s (dtdurations)', id: TimeFormats.DurationS },
{ name: 'duration in h:m:s (dthms)', id: TimeFormats.DurationHms },
{ name: 'duration in d:h:m:s (dtdhms)', id: TimeFormats.DurationDhms },
{ name: 'timeticks (timeticks)', id: TimeFormats.Timeticks },
{ name: 'clock in ms (clockms)', id: TimeFormats.ClockMs },
{ name: 'clock in s (clocks)', id: TimeFormats.ClockS },
],
},
{
name: CategoryNames.Throughput,
formats: [
{ name: 'counts/sec (cps)', id: ThroughputFormats.CountsPerSec },
{ name: 'ops/sec (ops)', id: ThroughputFormats.OpsPerSec },
{ name: 'requests/sec (reqps)', id: ThroughputFormats.RequestsPerSec },
{ name: 'reads/sec (rps)', id: ThroughputFormats.ReadsPerSec },
{ name: 'writes/sec (wps)', id: ThroughputFormats.WritesPerSec },
{ name: 'I/O operations/sec (iops)', id: ThroughputFormats.IOOpsPerSec },
{ name: 'counts/min (cpm)', id: ThroughputFormats.CountsPerMin },
{ name: 'ops/min (opm)', id: ThroughputFormats.OpsPerMin },
{ name: 'reads/min (rpm)', id: ThroughputFormats.ReadsPerMin },
{ name: 'writes/min (wpm)', id: ThroughputFormats.WritesPerMin },
],
},
{
name: CategoryNames.Data,
formats: [
{ name: 'bytes(IEC)', id: DataFormats.BytesIEC },
{ name: 'bytes(SI)', id: DataFormats.BytesSI },
{ name: 'bits(IEC)', id: DataFormats.BitsIEC },
{ name: 'bits(SI)', id: DataFormats.BitsSI },
{ name: 'kibibytes', id: DataFormats.KibiBytes },
{ name: 'kilobytes', id: DataFormats.KiloBytes },
{ name: 'mebibytes', id: DataFormats.MebiBytes },
{ name: 'megabytes', id: DataFormats.MegaBytes },
{ name: 'gibibytes', id: DataFormats.GibiBytes },
{ name: 'gigabytes', id: DataFormats.GigaBytes },
{ name: 'tebibytes', id: DataFormats.TebiBytes },
{ name: 'terabytes', id: DataFormats.TeraBytes },
{ name: 'pebibytes', id: DataFormats.PebiBytes },
{ name: 'petabytes', id: DataFormats.PetaBytes },
],
},
{
name: CategoryNames.DataRate,
formats: [
{ name: 'packets/sec', id: DataRateFormats.PacketsPerSec },
{ name: 'bytes/sec(IEC)', id: DataRateFormats.BytesPerSecIEC },
{ name: 'bytes/sec(SI)', id: DataRateFormats.BytesPerSecSI },
{ name: 'bits/sec(IEC)', id: DataRateFormats.BitsPerSecIEC },
{ name: 'bits/sec(SI)', id: DataRateFormats.BitsPerSecSI },
{ name: 'kibibytes/sec', id: DataRateFormats.KibiBytesPerSec },
{ name: 'kibibits/sec', id: DataRateFormats.KibiBitsPerSec },
{ name: 'kilobytes/sec', id: DataRateFormats.KiloBytesPerSec },
{ name: 'kilobits/sec', id: DataRateFormats.KiloBitsPerSec },
{ name: 'mebibytes/sec', id: DataRateFormats.MebiBytesPerSec },
{ name: 'mebibits/sec', id: DataRateFormats.MebiBitsPerSec },
{ name: 'megabytes/sec', id: DataRateFormats.MegaBytesPerSec },
{ name: 'megabits/sec', id: DataRateFormats.MegaBitsPerSec },
{ name: 'gibibytes/sec', id: DataRateFormats.GibiBytesPerSec },
{ name: 'gibibits/sec', id: DataRateFormats.GibiBitsPerSec },
{ name: 'gigabytes/sec', id: DataRateFormats.GigaBytesPerSec },
{ name: 'gigabits/sec', id: DataRateFormats.GigaBitsPerSec },
{ name: 'tebibytes/sec', id: DataRateFormats.TebiBytesPerSec },
{ name: 'tebibits/sec', id: DataRateFormats.TebiBitsPerSec },
{ name: 'terabytes/sec', id: DataRateFormats.TeraBytesPerSec },
{ name: 'terabits/sec', id: DataRateFormats.TeraBitsPerSec },
{ name: 'pebibytes/sec', id: DataRateFormats.PebiBytesPerSec },
{ name: 'pebibits/sec', id: DataRateFormats.PebiBitsPerSec },
{ name: 'petabytes/sec', id: DataRateFormats.PetaBytesPerSec },
{ name: 'petabits/sec', id: DataRateFormats.PetaBitsPerSec },
],
},
{
name: CategoryNames.HashRate,
formats: [
{ name: 'hashes/sec', id: HashRateFormats.HashesPerSec },
{ name: 'kilohashes/sec', id: HashRateFormats.KiloHashesPerSec },
{ name: 'megahashes/sec', id: HashRateFormats.MegaHashesPerSec },
{ name: 'gigahashes/sec', id: HashRateFormats.GigaHashesPerSec },
{ name: 'terahashes/sec', id: HashRateFormats.TeraHashesPerSec },
{ name: 'petahashes/sec', id: HashRateFormats.PetaHashesPerSec },
{ name: 'exahashes/sec', id: HashRateFormats.ExaHashesPerSec },
],
},
{
name: CategoryNames.Miscellaneous,
formats: [
{ name: 'none', id: MiscellaneousFormats.None },
{ name: 'String', id: MiscellaneousFormats.String },
{ name: 'short', id: MiscellaneousFormats.Short },
{ name: 'Percent (0-100)', id: MiscellaneousFormats.Percent },
{ name: 'Percent (0.0-1.0)', id: MiscellaneousFormats.PercentUnit },
{ name: 'Humidity (%H)', id: MiscellaneousFormats.Humidity },
{ name: 'Decibel', id: MiscellaneousFormats.Decibel },
{ name: 'Hexadecimal (0x)', id: MiscellaneousFormats.Hexadecimal0x },
{ name: 'Hexadecimal', id: MiscellaneousFormats.Hexadecimal },
{ name: 'Scientific notation', id: MiscellaneousFormats.ScientificNotation },
{ name: 'Locale format', id: MiscellaneousFormats.LocaleFormat },
{ name: 'Pixels', id: MiscellaneousFormats.Pixels },
],
},
{
name: CategoryNames.Acceleration,
formats: [
{ name: 'Meters/sec²', id: AccelerationFormats.MetersPerSecondSquared },
{ name: 'Feet/sec²', id: AccelerationFormats.FeetPerSecondSquared },
{ name: 'G unit', id: AccelerationFormats.GUnit },
],
},
{
name: CategoryNames.Angle,
formats: [
{ name: 'Degrees (°)', id: AngularFormats.Degree },
{ name: 'Radians', id: AngularFormats.Radian },
{ name: 'Gradian', id: AngularFormats.Gradian },
{ name: 'Arc Minutes', id: AngularFormats.ArcMinute },
{ name: 'Arc Seconds', id: AngularFormats.ArcSecond },
],
},
{
name: CategoryNames.Area,
formats: [
{ name: 'Square Meters (m²)', id: AreaFormats.SquareMeters },
{ name: 'Square Feet (ft²)', id: AreaFormats.SquareFeet },
{ name: 'Square Miles (mi²)', id: AreaFormats.SquareMiles },
],
},
{
name: CategoryNames.Computation,
formats: [
{ name: 'FLOP/s', id: FlopsFormats.FLOPs },
{ name: 'MFLOP/s', id: FlopsFormats.MFLOPs },
{ name: 'GFLOP/s', id: FlopsFormats.GFLOPs },
{ name: 'TFLOP/s', id: FlopsFormats.TFLOPs },
{ name: 'PFLOP/s', id: FlopsFormats.PFLOPs },
{ name: 'EFLOP/s', id: FlopsFormats.EFLOPs },
{ name: 'ZFLOP/s', id: FlopsFormats.ZFLOPs },
{ name: 'YFLOP/s', id: FlopsFormats.YFLOPs },
],
},
{
name: CategoryNames.Concentration,
formats: [
{ name: 'parts-per-million (ppm)', id: ConcentrationFormats.PPM },
{ name: 'parts-per-billion (ppb)', id: ConcentrationFormats.PPB },
{ name: 'nanogram per cubic meter (ng/m³)', id: ConcentrationFormats.NgM3 },
{
name: 'nanogram per normal cubic meter (ng/Nm³)',
id: ConcentrationFormats.NgNM3,
},
{ name: 'microgram per cubic meter (μg/m³)', id: ConcentrationFormats.UgM3 },
{
name: 'microgram per normal cubic meter (μg/Nm³)',
id: ConcentrationFormats.UgNM3,
},
{ name: 'milligram per cubic meter (mg/m³)', id: ConcentrationFormats.MgM3 },
{
name: 'milligram per normal cubic meter (mg/Nm³)',
id: ConcentrationFormats.MgNM3,
},
{ name: 'gram per cubic meter (g/m³)', id: ConcentrationFormats.GM3 },
{
name: 'gram per normal cubic meter (g/Nm³)',
id: ConcentrationFormats.GNM3,
},
{ name: 'milligrams per decilitre (mg/dL)', id: ConcentrationFormats.MgDL },
{ name: 'millimoles per litre (mmol/L)', id: ConcentrationFormats.MmolL },
],
},
{
name: CategoryNames.Currency,
formats: [
{ name: 'Dollars ($)', id: CurrencyFormats.USD },
{ name: 'Pounds (£)', id: CurrencyFormats.GBP },
{ name: 'Euro (€)', id: CurrencyFormats.EUR },
{ name: 'Yen (¥)', id: CurrencyFormats.JPY },
{ name: 'Rubles (₽)', id: CurrencyFormats.RUB },
{ name: 'Hryvnias (₴)', id: CurrencyFormats.UAH },
{ name: 'Real (R$)', id: CurrencyFormats.BRL },
{ name: 'Danish Krone (kr)', id: CurrencyFormats.DKK },
{ name: 'Icelandic Króna (kr)', id: CurrencyFormats.ISK },
{ name: 'Norwegian Krone (kr)', id: CurrencyFormats.NOK },
{ name: 'Swedish Krona (kr)', id: CurrencyFormats.SEK },
{ name: 'Czech koruna (czk)', id: CurrencyFormats.CZK },
{ name: 'Swiss franc (CHF)', id: CurrencyFormats.CHF },
{ name: 'Polish Złoty (PLN)', id: CurrencyFormats.PLN },
{ name: 'Bitcoin (฿)', id: CurrencyFormats.BTC },
{ name: 'Milli Bitcoin (฿)', id: CurrencyFormats.MBTC },
{ name: 'Micro Bitcoin (฿)', id: CurrencyFormats.UBTC },
{ name: 'South African Rand (R)', id: CurrencyFormats.ZAR },
{ name: 'Indian Rupee (₹)', id: CurrencyFormats.INR },
{ name: 'South Korean Won (₩)', id: CurrencyFormats.KRW },
{ name: 'Indonesian Rupiah (Rp)', id: CurrencyFormats.IDR },
{ name: 'Philippine Peso (PHP)', id: CurrencyFormats.PHP },
{ name: 'Vietnamese Dong (VND)', id: CurrencyFormats.VND },
],
},
{
name: CategoryNames.Datetime,
formats: [
{ name: 'Datetime ISO', id: DatetimeFormats.ISO },
{
name: 'Datetime ISO (No date if today)',
id: DatetimeFormats.ISONoDateIfToday,
},
{ name: 'Datetime US', id: DatetimeFormats.US },
{
name: 'Datetime US (No date if today)',
id: DatetimeFormats.USNoDateIfToday,
},
{ name: 'Datetime local', id: DatetimeFormats.Local },
{
name: 'Datetime local (No date if today)',
id: DatetimeFormats.LocalNoDateIfToday,
},
{ name: 'Datetime default', id: DatetimeFormats.System },
{ name: 'From Now', id: DatetimeFormats.FromNow },
],
},
{
name: CategoryNames.Energy,
formats: [
{ name: 'Watt (W)', id: PowerElectricalFormats.WATT },
{ name: 'Kilowatt (kW)', id: PowerElectricalFormats.KWATT },
{ name: 'Megawatt (MW)', id: PowerElectricalFormats.MEGWATT },
{ name: 'Gigawatt (GW)', id: PowerElectricalFormats.GWATT },
{ name: 'Milliwatt (mW)', id: PowerElectricalFormats.MWATT },
{ name: 'Watt per square meter (W/m²)', id: PowerElectricalFormats.WM2 },
{ name: 'Volt-Ampere (VA)', id: PowerElectricalFormats.VOLTAMP },
{ name: 'Kilovolt-Ampere (kVA)', id: PowerElectricalFormats.KVOLTAMP },
{
name: 'Volt-Ampere reactive (VAr)',
id: PowerElectricalFormats.VOLTAMPREACT,
},
{
name: 'Kilovolt-Ampere reactive (kVAr)',
id: PowerElectricalFormats.KVOLTAMPREACT,
},
{ name: 'Watt-hour (Wh)', id: PowerElectricalFormats.WATTH },
{
name: 'Watt-hour per Kilogram (Wh/kg)',
id: PowerElectricalFormats.WATTHPERKG,
},
{ name: 'Kilowatt-hour (kWh)', id: PowerElectricalFormats.KWATTH },
{ name: 'Kilowatt-min (kWm)', id: PowerElectricalFormats.KWATTM },
{ name: 'Ampere-hour (Ah)', id: PowerElectricalFormats.AMPH },
{ name: 'Kiloampere-hour (kAh)', id: PowerElectricalFormats.KAMPH },
{ name: 'Milliampere-hour (mAh)', id: PowerElectricalFormats.MAMPH },
{ name: 'Joule (J)', id: PowerElectricalFormats.JOULE },
{ name: 'Electron volt (eV)', id: PowerElectricalFormats.EV },
{ name: 'Ampere (A)', id: PowerElectricalFormats.AMP },
{ name: 'Kiloampere (kA)', id: PowerElectricalFormats.KAMP },
{ name: 'Milliampere (mA)', id: PowerElectricalFormats.MAMP },
{ name: 'Volt (V)', id: PowerElectricalFormats.VOLT },
{ name: 'Kilovolt (kV)', id: PowerElectricalFormats.KVOLT },
{ name: 'Millivolt (mV)', id: PowerElectricalFormats.MVOLT },
{ name: 'Decibel-milliwatt (dBm)', id: PowerElectricalFormats.DBM },
{ name: 'Ohm (Ω)', id: PowerElectricalFormats.OHM },
{ name: 'Kiloohm (kΩ)', id: PowerElectricalFormats.KOHM },
{ name: 'Megaohm (MΩ)', id: PowerElectricalFormats.MOHM },
{ name: 'Farad (F)', id: PowerElectricalFormats.FARAD },
{ name: 'Microfarad (µF)', id: PowerElectricalFormats.µFARAD },
{ name: 'Nanofarad (nF)', id: PowerElectricalFormats.NFARAD },
{ name: 'Picofarad (pF)', id: PowerElectricalFormats.PFARAD },
{ name: 'Femtofarad (fF)', id: PowerElectricalFormats.FFARAD },
{ name: 'Henry (H)', id: PowerElectricalFormats.HENRY },
{ name: 'Millihenry (mH)', id: PowerElectricalFormats.MHENRY },
{ name: 'Microhenry (µH)', id: PowerElectricalFormats.µHENRY },
{ name: 'Lumens (Lm)', id: PowerElectricalFormats.LUMENS },
],
},
{
name: CategoryNames.Flow,
formats: [
{ name: 'Gallons/min (gpm)', id: FlowFormats.FLOWGPM },
{ name: 'Cubic meters/sec (cms)', id: FlowFormats.FLOWCMS },
{ name: 'Cubic feet/sec (cfs)', id: FlowFormats.FLOWCFS },
{ name: 'Cubic feet/min (cfm)', id: FlowFormats.FLOWCFM },
{ name: 'Litre/hour', id: FlowFormats.LITREH },
{ name: 'Litre/min (L/min)', id: FlowFormats.FLOWLPM },
{ name: 'milliLitre/min (mL/min)', id: FlowFormats.FLOWMLPM },
{ name: 'Lux (lx)', id: FlowFormats.LUX },
],
},
{
name: CategoryNames.Force,
formats: [
{ name: 'Newton-meters (Nm)', id: ForceFormats.FORCENM },
{ name: 'Kilonewton-meters (kNm)', id: ForceFormats.FORCEKNM },
{ name: 'Newtons (N)', id: ForceFormats.FORCEN },
{ name: 'Kilonewtons (kN)', id: ForceFormats.FORCEKN },
],
},
{
name: CategoryNames.Mass,
formats: [
{ name: 'milligram (mg)', id: MassFormats.MASSMG },
{ name: 'gram (g)', id: MassFormats.MASSG },
{ name: 'pound (lb)', id: MassFormats.MASSLB },
{ name: 'kilogram (kg)', id: MassFormats.MASSKG },
{ name: 'metric ton (t)', id: MassFormats.MASST },
],
},
{
name: CategoryNames.Length,
formats: [
{ name: 'millimeter (mm)', id: LengthFormats.LENGTHMM },
{ name: 'inch (in)', id: LengthFormats.LENGTHIN },
{ name: 'feet (ft)', id: LengthFormats.LENGTHFT },
{ name: 'meter (m)', id: LengthFormats.LENGTHM },
{ name: 'kilometer (km)', id: LengthFormats.LENGTHKM },
{ name: 'mile (mi)', id: LengthFormats.LENGTHMI },
],
},
{
name: CategoryNames.Pressure,
formats: [
{ name: 'Millibars', id: PressureFormats.PRESSUREMBAR },
{ name: 'Bars', id: PressureFormats.PRESSUREBAR },
{ name: 'Kilobars', id: PressureFormats.PRESSUREKBAR },
{ name: 'Pascals', id: PressureFormats.PRESSUREPA },
{ name: 'Hectopascals', id: PressureFormats.PRESSUREHPA },
{ name: 'Kilopascals', id: PressureFormats.PRESSUREKPA },
{ name: 'Inches of mercury', id: PressureFormats.PRESSUREHG },
{ name: 'PSI', id: PressureFormats.PRESSUREPSI },
],
},
{
name: CategoryNames.Radiation,
formats: [
{ name: 'Becquerel (Bq)', id: RadiationFormats.RADBQ },
{ name: 'curie (Ci)', id: RadiationFormats.RADCI },
{ name: 'Gray (Gy)', id: RadiationFormats.RADGY },
{ name: 'rad', id: RadiationFormats.RADRAD },
{ name: 'Sievert (Sv)', id: RadiationFormats.RADSV },
{ name: 'milliSievert (mSv)', id: RadiationFormats.RADMSV },
{ name: 'microSievert (µSv)', id: RadiationFormats.RADUSV },
{ name: 'rem', id: RadiationFormats.RADREM },
{ name: 'Exposure (C/kg)', id: RadiationFormats.RADEXPCKG },
{ name: 'roentgen (R)', id: RadiationFormats.RADR },
{ name: 'Sievert/hour (Sv/h)', id: RadiationFormats.RADSVH },
{ name: 'milliSievert/hour (mSv/h)', id: RadiationFormats.RADMSVH },
{ name: 'microSievert/hour (µSv/h)', id: RadiationFormats.RADUSVH },
],
},
{
name: CategoryNames.RotationSpeed,
formats: [
{ name: 'Revolutions per minute (rpm)', id: RotationSpeedFormats.ROTRPM },
{ name: 'Hertz (Hz)', id: RotationSpeedFormats.ROTHZ },
{ name: 'Radians per second (rad/s)', id: RotationSpeedFormats.ROTRADS },
{ name: 'Degrees per second (°/s)', id: RotationSpeedFormats.ROTDEGS },
],
},
{
name: CategoryNames.Temperature,
formats: [
{ name: 'Celsius (°C)', id: TemperatureFormats.CELSIUS },
{ name: 'Fahrenheit (°F)', id: TemperatureFormats.FAHRENHEIT },
{ name: 'Kelvin (K)', id: TemperatureFormats.KELVIN },
],
},
{
name: CategoryNames.Velocity,
formats: [
{ name: 'meters/second (m/s)', id: VelocityFormats.METERS_PER_SECOND },
{ name: 'kilometers/hour (km/h)', id: VelocityFormats.KILOMETERS_PER_HOUR },
{ name: 'miles/hour (mph)', id: VelocityFormats.MILES_PER_HOUR },
{ name: 'knot (kn)', id: VelocityFormats.KNOT },
],
},
{
name: CategoryNames.Volume,
formats: [
{ name: 'millilitre (mL)', id: VolumeFormats.MILLILITRE },
{ name: 'litre (L)', id: VolumeFormats.LITRE },
{ name: 'cubic meter', id: VolumeFormats.CUBIC_METER },
{ name: 'Normal cubic meter', id: VolumeFormats.NORMAL_CUBIC_METER },
{ name: 'cubic decimeter', id: VolumeFormats.CUBIC_DECIMETER },
{ name: 'gallons', id: VolumeFormats.GALLONS },
],
},
{
name: CategoryNames.Boolean,
formats: [
{ name: 'True / False', id: BooleanFormats.TRUE_FALSE },
{ name: 'Yes / No', id: BooleanFormats.YES_NO },
{ name: 'On / Off', id: BooleanFormats.ON_OFF },
],
},
];
const foundCategory = categories.find((category) =>
category.units.some((unit) => {
// Units in Y-axis categories use universal unit IDs.
// Thresholds / column units often use Grafana-style IDs.
// Treat a unit as matching if either:
// - it is already the universal ID, or
// - it matches the mapped Grafana ID for that universal unit.
if (unit.id === unitId) {
return true;
}
export const flattenedCategories = flattenDeep(
dataTypeCategories.map((category) => category.formats),
);
const grafanaId = UniversalUnitToGrafanaUnit[unit.id];
return grafanaId === unitId;
}),
);
return foundCategory ? foundCategory.name : null;
type ConversionFactors = {
[key: string]: {
[key: string]: number | null;
};
};
// Object containing conversion factors for various categories and formats
const conversionFactors: ConversionFactors = {
[CategoryNames.Time]: {
[TimeFormats.Hertz]: 1,
[TimeFormats.Nanoseconds]: 1e-9,
[TimeFormats.Microseconds]: 1e-6,
[TimeFormats.Milliseconds]: 1e-3,
[TimeFormats.Seconds]: 1,
[TimeFormats.Minutes]: 60,
[TimeFormats.Hours]: 3600,
[TimeFormats.Days]: 86400,
[TimeFormats.DurationMs]: 1e-3,
[TimeFormats.DurationS]: 1,
[TimeFormats.DurationHms]: null, // Requires special handling
[TimeFormats.DurationDhms]: null, // Requires special handling
[TimeFormats.Timeticks]: null, // Requires special handling
[TimeFormats.ClockMs]: 1e-3,
[TimeFormats.ClockS]: 1,
},
[CategoryNames.Throughput]: {
[ThroughputFormats.CountsPerSec]: 1,
[ThroughputFormats.OpsPerSec]: 1,
[ThroughputFormats.RequestsPerSec]: 1,
[ThroughputFormats.ReadsPerSec]: 1,
[ThroughputFormats.WritesPerSec]: 1,
[ThroughputFormats.IOOpsPerSec]: 1,
[ThroughputFormats.CountsPerMin]: 1 / 60,
[ThroughputFormats.OpsPerMin]: 1 / 60,
[ThroughputFormats.ReadsPerMin]: 1 / 60,
[ThroughputFormats.WritesPerMin]: 1 / 60,
},
[CategoryNames.Data]: {
[DataFormats.BytesIEC]: 1,
[DataFormats.BytesSI]: 1,
[DataFormats.BitsIEC]: 0.125,
[DataFormats.BitsSI]: 0.125,
[DataFormats.KibiBytes]: 1024,
[DataFormats.KiloBytes]: 1000,
[DataFormats.MebiBytes]: 1048576,
[DataFormats.MegaBytes]: 1000000,
[DataFormats.GibiBytes]: 1073741824,
[DataFormats.GigaBytes]: 1000000000,
[DataFormats.TebiBytes]: 1099511627776,
[DataFormats.TeraBytes]: 1000000000000,
[DataFormats.PebiBytes]: 1125899906842624,
[DataFormats.PetaBytes]: 1000000000000000,
},
[CategoryNames.DataRate]: {
[DataRateFormats.PacketsPerSec]: null, // Cannot convert directly to other data rates
[DataRateFormats.BytesPerSecIEC]: 1,
[DataRateFormats.BytesPerSecSI]: 1,
[DataRateFormats.BitsPerSecIEC]: 0.125,
[DataRateFormats.BitsPerSecSI]: 0.125,
[DataRateFormats.KibiBytesPerSec]: 1024,
[DataRateFormats.KibiBitsPerSec]: 128,
[DataRateFormats.KiloBytesPerSec]: 1000,
[DataRateFormats.KiloBitsPerSec]: 125,
[DataRateFormats.MebiBytesPerSec]: 1048576,
[DataRateFormats.MebiBitsPerSec]: 131072,
[DataRateFormats.MegaBytesPerSec]: 1000000,
[DataRateFormats.MegaBitsPerSec]: 125000,
[DataRateFormats.GibiBytesPerSec]: 1073741824,
[DataRateFormats.GibiBitsPerSec]: 134217728,
[DataRateFormats.GigaBytesPerSec]: 1000000000,
[DataRateFormats.GigaBitsPerSec]: 125000000,
[DataRateFormats.TebiBytesPerSec]: 1099511627776,
[DataRateFormats.TebiBitsPerSec]: 137438953472,
[DataRateFormats.TeraBytesPerSec]: 1000000000000,
[DataRateFormats.TeraBitsPerSec]: 125000000000,
[DataRateFormats.PebiBytesPerSec]: 1125899906842624,
[DataRateFormats.PebiBitsPerSec]: 140737488355328,
[DataRateFormats.PetaBytesPerSec]: 1000000000000000,
[DataRateFormats.PetaBitsPerSec]: 125000000000000,
},
[CategoryNames.Miscellaneous]: {
[MiscellaneousFormats.None]: null,
[MiscellaneousFormats.String]: null,
[MiscellaneousFormats.Short]: null,
[MiscellaneousFormats.Percent]: 1,
[MiscellaneousFormats.PercentUnit]: 100,
[MiscellaneousFormats.Humidity]: 1,
[MiscellaneousFormats.Decibel]: null,
[MiscellaneousFormats.Hexadecimal0x]: null,
[MiscellaneousFormats.Hexadecimal]: null,
[MiscellaneousFormats.ScientificNotation]: null,
[MiscellaneousFormats.LocaleFormat]: null,
[MiscellaneousFormats.Pixels]: null,
},
[CategoryNames.Boolean]: {
[BooleanFormats.TRUE_FALSE]: null, // Not convertible
[BooleanFormats.YES_NO]: null, // Not convertible
[BooleanFormats.ON_OFF]: null, // Not convertible
},
};
// Function to get the conversion factor between two units in a specific category
function getConversionFactor(
fromUnit: string,
toUnit: string,
category: CategoryNames,
): number | null {
// Retrieves the conversion factors for the specified category
const categoryFactors = conversionFactors[category];
if (!categoryFactors) {
return null; // Returns null if the category does not exist
}
const fromFactor = categoryFactors[fromUnit];
const toFactor = categoryFactors[toUnit];
if (
fromFactor === undefined ||
toFactor === undefined ||
fromFactor === null ||
toFactor === null
) {
return null; // Returns null if either unit does not exist or is not convertible
}
return fromFactor / toFactor; // Returns the conversion factor ratio
}
// Function to convert a value from one unit to another
export function convertUnit(
value: number,
fromUnitId?: string,
toUnitId?: string,
): number | null {
if (!fromUnitId || !toUnitId) {
let fromUnit: string | undefined;
let toUnit: string | undefined;
// Finds the category that contains the specified units and extracts fromUnit and toUnit using array methods
const category = dataTypeCategories.find((category) =>
category.formats.some((format) => {
if (format.id === fromUnitId) {
fromUnit = format.id;
}
if (format.id === toUnitId) {
toUnit = format.id;
}
return fromUnit && toUnit; // Break out early if both units are found
}),
);
if (!category || !fromUnit || !toUnit) {
return null;
}
} // Return null if category or units are not found
const fromCategory = getCategoryName(fromUnitId);
const toCategory = getCategoryName(toUnitId);
// If either unit is unknown or the categories don't match, the conversion is invalid
if (!fromCategory || !toCategory || fromCategory !== toCategory) {
// Gets the conversion factor for the specified units
const conversionFactor = getConversionFactor(
fromUnit,
toUnit,
category.name as any,
);
if (conversionFactor === null) {
return null;
}
} // Return null if conversion is not possible
// Delegate the actual numeric conversion (or identity) to the shared helper,
// which understands both Grafana-style and universal unit IDs.
return convertValue(value, fromUnitId, toUnitId);
return value * conversionFactor;
}
// Function to get the category name for a given unit ID
export const getCategoryName = (unitId: string): CategoryNames | null => {
// Finds the category that contains the specified unit ID
const foundCategory = dataTypeCategories.find((category) =>
category.formats.some((format) => format.id === unitId),
);
return foundCategory ? (foundCategory.name as CategoryNames) : null;
};

View File

@@ -2,9 +2,6 @@ import { Layout } from 'react-grid-layout';
import { DefaultOptionType } from 'antd/es/select';
import { omitIdFromQuery } from 'components/ExplorerCard/utils';
import { PrecisionOptionsEnum } from 'components/Graph/types';
import { YAxisCategoryNames } from 'components/YAxisUnitSelector/constants';
import { YAxisSource } from 'components/YAxisUnitSelector/types';
import { getYAxisCategories } from 'components/YAxisUnitSelector/utils';
import {
initialQueryBuilderFormValuesMap,
PANEL_TYPES,
@@ -24,7 +21,11 @@ import { IBuilderQuery, Query } from 'types/api/queryBuilder/queryBuilderData';
import { EQueryType } from 'types/common/dashboard';
import { DataSource } from 'types/common/queryBuilder';
import { getCategoryName } from './RightContainer/dataFormatCategories';
import {
dataTypeCategories,
getCategoryName,
} from './RightContainer/dataFormatCategories';
import { CategoryNames } from './RightContainer/types';
export const getIsQueryModified = (
currentQuery: Query,
@@ -605,21 +606,14 @@ export const PANEL_TYPE_TO_QUERY_TYPES: Record<PANEL_TYPES, EQueryType[]> = {
* the label and value for each format.
*/
export const getCategorySelectOptionByName = (
name?: YAxisCategoryNames,
): DefaultOptionType[] => {
const categories = getYAxisCategories(YAxisSource.DASHBOARDS);
if (!categories.length) {
return [];
}
return (
categories
.find((category) => category.name === name)
?.units.map((unit) => ({
label: unit.name,
value: unit.id,
})) || []
);
};
name?: CategoryNames | string,
): DefaultOptionType[] =>
dataTypeCategories
.find((category) => category.name === name)
?.formats.map((format) => ({
label: format.name,
value: format.id,
})) || [];
/**
* Generates unit options based on the provided column unit.

View File

@@ -116,7 +116,7 @@ describe('SSOEnforcementToggle', () => {
render(
<SSOEnforcementToggle
isDefaultChecked={true}
record={{ ...mockGoogleAuthDomain, id: '' }}
record={{ ...mockGoogleAuthDomain, id: undefined }}
/>,
);

View File

@@ -1,13 +1,10 @@
import { YAxisCategoryNames } from 'components/YAxisUnitSelector/constants';
import { CategoryNames } from 'container/NewWidget/RightContainer/types';
export const categoryToSupport: YAxisCategoryNames[] = [
YAxisCategoryNames.None,
YAxisCategoryNames.Data,
YAxisCategoryNames.DataRate,
YAxisCategoryNames.Time,
YAxisCategoryNames.Count,
YAxisCategoryNames.Operations,
YAxisCategoryNames.Percentage,
YAxisCategoryNames.Miscellaneous,
YAxisCategoryNames.Boolean,
export const categoryToSupport = [
CategoryNames.Data,
CategoryNames.DataRate,
CategoryNames.Time,
CategoryNames.Throughput,
CategoryNames.Miscellaneous,
CategoryNames.Boolean,
];

View File

@@ -26,22 +26,5 @@ func (provider *provider) addAuthzRoutes(router *mux.Router) error {
return err
}
if err := router.Handle("/api/v1/authz/resources", handler.New(provider.authZ.OpenAccess(provider.authzHandler.GetResources), handler.OpenAPIDef{
ID: "AuthzResources",
Tags: []string{"authz"},
Summary: "Get resources",
Description: "Gets all the available resources",
Request: nil,
RequestContentType: "",
Response: new(authtypes.GettableResources),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{},
Deprecated: false,
SecuritySchemes: nil,
})).Methods(http.MethodGet).GetError(); err != nil {
return err
}
return nil
}

View File

@@ -55,7 +55,7 @@ func (provider *provider) addGatewayRoutes(router *mux.Router) error {
RequestContentType: "application/json",
Response: new(gatewaytypes.GettableCreatedIngestionKey),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusCreated,
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),

View File

@@ -81,7 +81,7 @@ func (provider *provider) addMetricsExplorerRoutes(router *mux.Router) error {
Response: new(metricsexplorertypes.MetricAttributesResponse),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized, http.StatusNotFound, http.StatusInternalServerError},
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized, http.StatusInternalServerError},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
})).Methods(http.MethodGet).GetError(); err != nil {
@@ -138,7 +138,7 @@ func (provider *provider) addMetricsExplorerRoutes(router *mux.Router) error {
Response: new(metricsexplorertypes.MetricHighlightsResponse),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized, http.StatusNotFound, http.StatusInternalServerError},
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized, http.StatusInternalServerError},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
})).Methods(http.MethodGet).GetError(); err != nil {
@@ -157,7 +157,7 @@ func (provider *provider) addMetricsExplorerRoutes(router *mux.Router) error {
Response: new(metricsexplorertypes.MetricAlertsResponse),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized, http.StatusNotFound, http.StatusInternalServerError},
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized, http.StatusInternalServerError},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
})).Methods(http.MethodGet).GetError(); err != nil {
@@ -176,7 +176,7 @@ func (provider *provider) addMetricsExplorerRoutes(router *mux.Router) error {
Response: new(metricsexplorertypes.MetricDashboardsResponse),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized, http.StatusNotFound, http.StatusInternalServerError},
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized, http.StatusInternalServerError},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
})).Methods(http.MethodGet).GetError(); err != nil {

View File

@@ -45,6 +45,23 @@ func (provider *provider) addRoleRoutes(router *mux.Router) error {
return err
}
if err := router.Handle("/api/v1/roles/resources", handler.New(provider.authZ.AdminAccess(provider.authzHandler.GetResources), handler.OpenAPIDef{
ID: "GetResources",
Tags: []string{"role"},
Summary: "Get resources",
Description: "Gets all the available resources for role assignment",
Request: nil,
RequestContentType: "",
Response: new(roletypes.GettableResources),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
})).Methods(http.MethodGet).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v1/roles/{id}", handler.New(provider.authZ.AdminAccess(provider.authzHandler.Get), handler.OpenAPIDef{
ID: "GetRole",
Tags: []string{"role"},
@@ -69,7 +86,7 @@ func (provider *provider) addRoleRoutes(router *mux.Router) error {
Description: "Gets all objects connected to the specified role via a given relation type",
Request: nil,
RequestContentType: "",
Response: make([]*authtypes.GettableObjects, 0),
Response: make([]*authtypes.Object, 0),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusNotFound, http.StatusNotImplemented, http.StatusUnavailableForLegalReasons},
@@ -101,7 +118,7 @@ func (provider *provider) addRoleRoutes(router *mux.Router) error {
Tags: []string{"role"},
Summary: "Patch objects for a role by relation",
Description: "Patches the objects connected to the specified role via a given relation type",
Request: new(authtypes.PatchableObjects),
Request: new(roletypes.PatchableObjects),
RequestContentType: "",
Response: nil,
ResponseContentType: "application/json",

View File

@@ -190,7 +190,7 @@ func (provider *provider) GetOrCreate(_ context.Context, _ valuer.UUID, _ *rolet
}
func (provider *provider) GetResources(_ context.Context) []*authtypes.Resource {
return []*authtypes.Resource{}
return nil
}
func (provider *provider) GetObjects(ctx context.Context, orgID valuer.UUID, id valuer.UUID, relation authtypes.Relation) ([]*authtypes.Object, error) {

View File

@@ -110,13 +110,13 @@ func (handler *handler) GetObjects(rw http.ResponseWriter, r *http.Request) {
return
}
render.Success(rw, http.StatusOK, authtypes.NewGettableObjects(objects))
render.Success(rw, http.StatusOK, objects)
}
func (handler *handler) GetResources(rw http.ResponseWriter, r *http.Request) {
resources := handler.authz.GetResources(r.Context())
render.Success(rw, http.StatusOK, authtypes.NewGettableResources(resources))
render.Success(rw, http.StatusOK, roletypes.NewGettableResources(resources))
}
func (handler *handler) List(rw http.ResponseWriter, r *http.Request) {
@@ -197,30 +197,25 @@ func (handler *handler) PatchObjects(rw http.ResponseWriter, r *http.Request) {
return
}
req := new(roletypes.PatchableObjects)
if err := binding.JSON.BindBody(r.Body, req); err != nil {
render.Error(rw, err)
return
}
role, err := handler.authz.Get(ctx, valuer.MustNewUUID(claims.OrgID), id)
if err != nil {
render.Error(rw, err)
return
}
if err := role.ErrIfManaged(); err != nil {
render.Error(rw, err)
return
}
req := new(authtypes.PatchableObjects)
if err := binding.JSON.BindBody(r.Body, req); err != nil {
render.Error(rw, err)
return
}
additions, deletions, err := authtypes.NewPatchableObjects(req.Additions, req.Deletions, relation)
patchableObjects, err := role.NewPatchableObjects(req.Additions, req.Deletions, relation)
if err != nil {
render.Error(rw, err)
return
}
err = handler.authz.PatchObjects(ctx, valuer.MustNewUUID(claims.OrgID), role.Name, relation, additions, deletions)
err = handler.authz.PatchObjects(ctx, valuer.MustNewUUID(claims.OrgID), role.Name, relation, patchableObjects.Additions, patchableObjects.Deletions)
if err != nil {
render.Error(rw, err)
return

View File

@@ -3,7 +3,6 @@ package configflagger
import (
"context"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/types/featuretypes"
@@ -33,10 +32,6 @@ func New(ctx context.Context, ps factory.ProviderSettings, c flagger.Config, reg
for name, value := range c.Config.Boolean {
feature, _, err := registry.GetByString(name)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", "name", name, "kind", "boolean")
continue
}
return nil, err
}
@@ -51,10 +46,6 @@ func New(ctx context.Context, ps factory.ProviderSettings, c flagger.Config, reg
for name, value := range c.Config.String {
feature, _, err := registry.GetByString(name)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", "name", name, "kind", "string")
continue
}
return nil, err
}
@@ -69,10 +60,6 @@ func New(ctx context.Context, ps factory.ProviderSettings, c flagger.Config, reg
for name, value := range c.Config.Float {
feature, _, err := registry.GetByString(name)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", "name", name, "kind", "float")
continue
}
return nil, err
}
@@ -87,10 +74,6 @@ func New(ctx context.Context, ps factory.ProviderSettings, c flagger.Config, reg
for name, value := range c.Config.Integer {
feature, _, err := registry.GetByString(name)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", "name", name, "kind", "integer")
continue
}
return nil, err
}
@@ -105,10 +88,6 @@ func New(ctx context.Context, ps factory.ProviderSettings, c flagger.Config, reg
for name, value := range c.Config.Object {
feature, _, err := registry.GetByString(name)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
settings.Logger().WarnContext(ctx, "skipping unknown feature flag", "name", name, "kind", "object")
continue
}
return nil, err
}

View File

@@ -5,7 +5,6 @@ import "github.com/SigNoz/signoz/pkg/types/featuretypes"
var (
FeatureUseSpanMetrics = featuretypes.MustNewName("use_span_metrics")
FeatureKafkaSpanEval = featuretypes.MustNewName("kafka_span_eval")
FeatureHideRootUser = featuretypes.MustNewName("hide_root_user")
)
func MustNewRegistry() featuretypes.Registry {
@@ -26,14 +25,6 @@ func MustNewRegistry() featuretypes.Registry {
DefaultVariant: featuretypes.MustNewName("disabled"),
Variants: featuretypes.NewBooleanVariants(),
},
&featuretypes.Feature{
Name: FeatureHideRootUser,
Kind: featuretypes.KindBoolean,
Stage: featuretypes.StageStable,
Description: "Controls whether root admin user is hidden or not",
DefaultVariant: featuretypes.MustNewName("disabled"),
Variants: featuretypes.NewBooleanVariants(),
},
)
if err != nil {
panic(err)

View File

@@ -122,7 +122,7 @@ func (handler *handler) CreateIngestionKey(rw http.ResponseWriter, r *http.Reque
return
}
render.Success(rw, http.StatusCreated, response)
render.Success(rw, http.StatusOK, response)
}
func (handler *handler) UpdateIngestionKey(rw http.ResponseWriter, r *http.Request) {

View File

@@ -0,0 +1,44 @@
package middleware
import (
"log/slog"
"net/http"
"runtime/debug"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/http/render"
)
// Recovery is a middleware that recovers from panics, logs the panic,
// and returns a 500 Internal Server Error.
type Recovery struct {
logger *slog.Logger
}
// NewRecovery creates a new Recovery middleware.
func NewRecovery(logger *slog.Logger) Wrapper {
return &Recovery{
logger: logger.With("pkg", "http-middleware-recovery"),
}
}
// Wrap is the middleware handler.
func (m *Recovery) Wrap(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer func() {
if err := recover(); err != nil {
m.logger.ErrorContext(
r.Context(),
"panic recovered",
"err", err, "stack", string(debug.Stack()),
)
render.Error(w, errors.NewInternalf(
errors.CodeInternal, "internal server error",
))
}
}()
next.ServeHTTP(w, r)
})
}

View File

@@ -1,7 +1,6 @@
package implmetricsexplorer
import (
"context"
"net/http"
"github.com/SigNoz/signoz/pkg/errors"
@@ -188,12 +187,6 @@ func (h *handler) GetMetricAlerts(rw http.ResponseWriter, req *http.Request) {
}
orgID := valuer.MustNewUUID(claims.OrgID)
if err := h.checkMetricExists(req.Context(), orgID, metricName); err != nil {
render.Error(rw, err)
return
}
out, err := h.module.GetMetricAlerts(req.Context(), orgID, metricName)
if err != nil {
render.Error(rw, err)
@@ -216,12 +209,6 @@ func (h *handler) GetMetricDashboards(rw http.ResponseWriter, req *http.Request)
}
orgID := valuer.MustNewUUID(claims.OrgID)
if err := h.checkMetricExists(req.Context(), orgID, metricName); err != nil {
render.Error(rw, err)
return
}
out, err := h.module.GetMetricDashboards(req.Context(), orgID, metricName)
if err != nil {
render.Error(rw, err)
@@ -244,12 +231,6 @@ func (h *handler) GetMetricHighlights(rw http.ResponseWriter, req *http.Request)
}
orgID := valuer.MustNewUUID(claims.OrgID)
if err := h.checkMetricExists(req.Context(), orgID, metricName); err != nil {
render.Error(rw, err)
return
}
highlights, err := h.module.GetMetricHighlights(req.Context(), orgID, metricName)
if err != nil {
render.Error(rw, err)
@@ -285,12 +266,6 @@ func (h *handler) GetMetricAttributes(rw http.ResponseWriter, req *http.Request)
}
orgID := valuer.MustNewUUID(claims.OrgID)
if err := h.checkMetricExists(req.Context(), orgID, metricName); err != nil {
render.Error(rw, err)
return
}
out, err := h.module.GetMetricAttributes(req.Context(), orgID, &in)
if err != nil {
render.Error(rw, err)
@@ -299,14 +274,3 @@ func (h *handler) GetMetricAttributes(rw http.ResponseWriter, req *http.Request)
render.Success(rw, http.StatusOK, out)
}
func (h *handler) checkMetricExists(ctx context.Context, orgID valuer.UUID, metricName string) error {
exists, err := h.module.CheckMetricExists(ctx, orgID, metricName)
if err != nil {
return err
}
if !exists {
return errors.NewNotFoundf(errors.CodeNotFound, "metric not found: %q", metricName)
}
return nil
}

View File

@@ -404,26 +404,6 @@ func (m *module) GetMetricAttributes(ctx context.Context, orgID valuer.UUID, req
}, nil
}
func (m *module) CheckMetricExists(ctx context.Context, orgID valuer.UUID, metricName string) (bool, error) {
sb := sqlbuilder.NewSelectBuilder()
sb.Select("count(*) > 0 as metricExists")
sb.From(fmt.Sprintf("%s.%s", telemetrymetrics.DBName, telemetrymetrics.AttributesMetadataTableName))
sb.Where(sb.E("metric_name", metricName))
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
db := m.telemetryStore.ClickhouseDB()
var exists bool
valueCtx := ctxtypes.SetClickhouseMaxThreads(ctx, m.config.TelemetryStore.Threads)
err := db.QueryRow(valueCtx, query, args...).Scan(&exists)
if err != nil {
return false, errors.WrapInternalf(err, errors.CodeInternal, "failed to check if metric exists")
}
return exists, nil
}
func (m *module) fetchMetadataFromCache(ctx context.Context, orgID valuer.UUID, metricNames []string) (map[string]*metricsexplorertypes.MetricMetadata, []string) {
hits := make(map[string]*metricsexplorertypes.MetricMetadata)
misses := make([]string, 0)

View File

@@ -23,7 +23,6 @@ type Handler interface {
// Module represents the metrics module interface.
type Module interface {
CheckMetricExists(ctx context.Context, orgID valuer.UUID, metricName string) (bool, error)
ListMetrics(ctx context.Context, orgID valuer.UUID, params *metricsexplorertypes.ListMetricsParams) (*metricsexplorertypes.ListMetricsResponse, error)
GetStats(ctx context.Context, orgID valuer.UUID, req *metricsexplorertypes.StatsRequest) (*metricsexplorertypes.StatsResponse, error)
GetTreemap(ctx context.Context, orgID valuer.UUID, req *metricsexplorertypes.TreemapRequest) (*metricsexplorertypes.TreemapResponse, error)

View File

@@ -120,8 +120,6 @@ func FilterResponse(results []*qbtypes.QueryRangeResponse) []*qbtypes.QueryRange
}
}
resultData.Rows = filteredRows
case *qbtypes.ScalarData:
resultData.Data = filterScalarDataIPs(resultData.Columns, resultData.Data)
}
filteredData = append(filteredData, result)
@@ -147,39 +145,6 @@ func shouldIncludeSeries(series *qbtypes.TimeSeries) bool {
return true
}
func filterScalarDataIPs(columns []*qbtypes.ColumnDescriptor, data [][]any) [][]any {
// Find column indices for server address fields
serverColIndices := make([]int, 0)
for i, col := range columns {
if col.Name == derivedKeyHTTPHost {
serverColIndices = append(serverColIndices, i)
}
}
if len(serverColIndices) == 0 {
return data
}
filtered := make([][]any, 0, len(data))
for _, row := range data {
includeRow := true
for _, colIdx := range serverColIndices {
if colIdx < len(row) {
if strVal, ok := row[colIdx].(string); ok {
if net.ParseIP(strVal) != nil {
includeRow = false
break
}
}
}
}
if includeRow {
filtered = append(filtered, row)
}
}
return filtered
}
func shouldIncludeRow(row *qbtypes.RawRow) bool {
if row.Data != nil {
if domainVal, ok := row.Data[derivedKeyHTTPHost]; ok {

View File

@@ -117,59 +117,6 @@ func TestFilterResponse(t *testing.T) {
},
},
},
{
name: "should filter out IP addresses from scalar data",
input: []*qbtypes.QueryRangeResponse{
{
Data: qbtypes.QueryData{
Results: []any{
&qbtypes.ScalarData{
QueryName: "endpoints",
Columns: []*qbtypes.ColumnDescriptor{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: derivedKeyHTTPHost},
Type: qbtypes.ColumnTypeGroup,
},
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "endpoints"},
Type: qbtypes.ColumnTypeAggregation,
},
},
Data: [][]any{
{"192.168.1.1", 10},
{"example.com", 20},
{"10.0.0.1", 5},
},
},
},
},
},
},
expected: []*qbtypes.QueryRangeResponse{
{
Data: qbtypes.QueryData{
Results: []any{
&qbtypes.ScalarData{
QueryName: "endpoints",
Columns: []*qbtypes.ColumnDescriptor{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: derivedKeyHTTPHost},
Type: qbtypes.ColumnTypeGroup,
},
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "endpoints"},
Type: qbtypes.ColumnTypeAggregation,
},
},
Data: [][]any{
{"example.com", 20},
},
},
},
},
},
},
},
}
for _, tt := range tests {

View File

@@ -2,22 +2,18 @@ package impluser
import (
"context"
"slices"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/modules/user"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/featuretypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type getter struct {
store types.UserStore
flagger flagger.Flagger
store types.UserStore
}
func NewGetter(store types.UserStore, flagger flagger.Flagger) user.Getter {
return &getter{store: store, flagger: flagger}
func NewGetter(store types.UserStore) user.Getter {
return &getter{store: store}
}
func (module *getter) GetRootUserByOrgID(ctx context.Context, orgID valuer.UUID) (*types.User, error) {
@@ -30,14 +26,6 @@ func (module *getter) ListByOrgID(ctx context.Context, orgID valuer.UUID) ([]*ty
return nil, err
}
// filter root users if feature flag `hide_root_users` is true
evalCtx := featuretypes.NewFlaggerEvaluationContext(orgID)
hideRootUsers := module.flagger.BooleanOrEmpty(ctx, flagger.FeatureHideRootUser, evalCtx)
if hideRootUsers {
users = slices.DeleteFunc(users, func(user *types.User) bool { return user.IsRoot })
}
return users, nil
}

View File

@@ -13,6 +13,7 @@ import (
root "github.com/SigNoz/signoz/pkg/modules/user"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/gorilla/mux"
)
@@ -462,7 +463,7 @@ func (h *handler) UpdateAPIKey(w http.ResponseWriter, r *http.Request) {
return
}
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(createdByUser.Email.String())) {
if slices.Contains(integrationtypes.IntegrationUserEmails, createdByUser.Email) {
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "API Keys for integration users cannot be revoked"))
return
}
@@ -507,7 +508,7 @@ func (h *handler) RevokeAPIKey(w http.ResponseWriter, r *http.Request) {
return
}
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(createdByUser.Email.String())) {
if slices.Contains(integrationtypes.IntegrationUserEmails, createdByUser.Email) {
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "API Keys for integration users cannot be revoked"))
return
}

View File

@@ -19,6 +19,7 @@ import (
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/emailtypes"
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
"github.com/SigNoz/signoz/pkg/types/roletypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/dustin/go-humanize"
@@ -173,7 +174,7 @@ func (m *Module) DeleteInvite(ctx context.Context, orgID string, id valuer.UUID)
func (module *Module) CreateUser(ctx context.Context, input *types.User, opts ...root.CreateUserOption) error {
createUserOpts := root.NewCreateUserOptions(opts...)
// since assign is idempotant multiple calls to assign won't cause issues in case of retries.
// since assign is idempotent multiple calls to assign won't cause issues in case of retries.
err := module.authz.Grant(ctx, input.OrgID, roletypes.MustGetSigNozManagedRoleFromExistingRole(input.Role), authtypes.MustNewSubject(authtypes.TypeableUser, input.ID.StringValue(), input.OrgID, nil))
if err != nil {
return err
@@ -279,7 +280,7 @@ func (module *Module) DeleteUser(ctx context.Context, orgID valuer.UUID, id stri
return errors.WithAdditionalf(err, "cannot delete root user")
}
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(user.Email.String())) {
if slices.Contains(integrationtypes.IntegrationUserEmails, user.Email) {
return errors.New(errors.TypeForbidden, errors.CodeForbidden, "integration user cannot be deleted")
}
@@ -293,7 +294,7 @@ func (module *Module) DeleteUser(ctx context.Context, orgID valuer.UUID, id stri
return errors.New(errors.TypeForbidden, errors.CodeForbidden, "cannot delete the last admin")
}
// since revoke is idempotant multiple calls to revoke won't cause issues in case of retries
// since revoke is idempotent multiple calls to revoke won't cause issues in case of retries
err = module.authz.Revoke(ctx, orgID, roletypes.MustGetSigNozManagedRoleFromExistingRole(user.Role), authtypes.MustNewSubject(authtypes.TypeableUser, id, orgID, nil))
if err != nil {
return err

View File

@@ -0,0 +1,571 @@
package baseprovider
import (
"context"
"fmt"
"log/slog"
"sort"
"sync"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/querier"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/store"
"github.com/SigNoz/signoz/pkg/query-service/utils"
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
"github.com/SigNoz/signoz/pkg/types/metrictypes"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
var (
CodeDashboardNotFound = errors.MustNewCode("dashboard_not_found")
)
// hasValidTimeSeriesData checks if a query response contains valid time series data
// with at least one aggregation, series, and value
func hasValidTimeSeriesData(queryResponse *qbtypes.TimeSeriesData) bool {
return queryResponse != nil &&
len(queryResponse.Aggregations) > 0 &&
len(queryResponse.Aggregations[0].Series) > 0 &&
len(queryResponse.Aggregations[0].Series[0].Values) > 0
}
type BaseCloudProvider[def integrationtypes.Definition, conf integrationtypes.CloudServiceConfig[def]] struct {
Logger *slog.Logger
Querier querier.Querier
AccountsRepo store.CloudProviderAccountsRepository
ServiceConfigRepo store.ServiceConfigDatabase
ServiceDefinitions *services.ServicesProvider[def]
ProviderType integrationtypes.CloudProviderType
}
func (b *BaseCloudProvider[def, conf]) GetName() integrationtypes.CloudProviderType {
return b.ProviderType
}
// AgentCheckIn is a helper function that handles common agent check-in logic.
// The getAgentConfigFunc should return the provider-specific agent configuration.
func AgentCheckIn[def integrationtypes.Definition, conf integrationtypes.CloudServiceConfig[def], AgentConfigT any](
b *BaseCloudProvider[def, conf],
ctx context.Context,
req *integrationtypes.PostableAgentCheckInPayload,
getAgentConfigFunc func(context.Context, *integrationtypes.CloudIntegration) (*AgentConfigT, error),
) (*integrationtypes.GettableAgentCheckIn[AgentConfigT], error) {
// agent can't check in unless the account is already created
existingAccount, err := b.AccountsRepo.Get(ctx, req.OrgID, b.GetName().String(), req.ID)
if err != nil {
return nil, err
}
if existingAccount != nil && existingAccount.AccountID != nil && *existingAccount.AccountID != req.AccountID {
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput,
"can't check in with new %s account id %s for account %s with existing %s id %s",
b.GetName().String(), req.AccountID, existingAccount.ID.StringValue(), b.GetName().String(),
*existingAccount.AccountID,
)
}
existingAccount, err = b.AccountsRepo.GetConnectedCloudAccount(ctx, req.OrgID, b.GetName().String(), req.AccountID)
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
return nil, err
}
if existingAccount != nil && existingAccount.ID.StringValue() != req.ID {
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput,
"can't check in to %s account %s with id %s. already connected with id %s",
b.GetName().String(), req.AccountID, req.ID, existingAccount.ID.StringValue(),
)
}
agentReport := integrationtypes.AgentReport{
TimestampMillis: time.Now().UnixMilli(),
Data: req.Data,
}
account, err := b.AccountsRepo.Upsert(
ctx, req.OrgID, b.GetName().String(), &req.ID, nil, &req.AccountID, &agentReport, nil,
)
if err != nil {
return nil, err
}
agentConfig, err := getAgentConfigFunc(ctx, account)
if err != nil {
return nil, err
}
return &integrationtypes.GettableAgentCheckIn[AgentConfigT]{
AccountId: account.ID.StringValue(),
CloudAccountId: *account.AccountID,
RemovedAt: account.RemovedAt,
IntegrationConfig: *agentConfig,
}, nil
}
func (b *BaseCloudProvider[def, conf]) GetAccountStatus(ctx context.Context, orgID, accountID string) (*integrationtypes.GettableAccountStatus, error) {
accountRecord, err := b.AccountsRepo.Get(ctx, orgID, b.ProviderType.String(), accountID)
if err != nil {
return nil, err
}
return &integrationtypes.GettableAccountStatus{
Id: accountRecord.ID.String(),
CloudAccountId: accountRecord.AccountID,
Status: accountRecord.Status(),
}, nil
}
func (b *BaseCloudProvider[def, conf]) ListConnectedAccounts(ctx context.Context, orgID string) (*integrationtypes.GettableConnectedAccountsList, error) {
accountRecords, err := b.AccountsRepo.ListConnected(ctx, orgID, b.ProviderType.String())
if err != nil {
return nil, err
}
connectedAccounts := make([]*integrationtypes.Account, 0, len(accountRecords))
for _, r := range accountRecords {
connectedAccounts = append(connectedAccounts, r.Account(b.ProviderType))
}
return &integrationtypes.GettableConnectedAccountsList{
Accounts: connectedAccounts,
}, nil
}
func (b *BaseCloudProvider[def, conf]) DisconnectAccount(ctx context.Context, orgID, accountID string) (*integrationtypes.CloudIntegration, error) {
account, err := b.AccountsRepo.Get(ctx, orgID, b.ProviderType.String(), accountID)
if err != nil {
return nil, err
}
tsNow := time.Now()
account, err = b.AccountsRepo.Upsert(
ctx, orgID, b.ProviderType.String(), &accountID, nil, nil, nil, &tsNow,
)
if err != nil {
return nil, err
}
return account, nil
}
func (b *BaseCloudProvider[def, conf]) GetDashboard(ctx context.Context, req *integrationtypes.GettableDashboard) (*dashboardtypes.Dashboard, error) {
allDashboards, err := b.GetAvailableDashboards(ctx, req.OrgID)
if err != nil {
return nil, err
}
for _, d := range allDashboards {
if d.ID == req.ID {
return d, nil
}
}
return nil, errors.NewNotFoundf(CodeDashboardNotFound, "dashboard with id %s not found", req.ID)
}
func (b *BaseCloudProvider[def, conf]) GetServiceConnectionStatus(
ctx context.Context,
cloudAccountID string,
orgID valuer.UUID,
definition def,
isMetricsEnabled bool,
isLogsEnabled bool,
) (*integrationtypes.ServiceConnectionStatus, error) {
ingestionStatusCheck := definition.GetIngestionStatusCheck()
if ingestionStatusCheck == nil {
return nil, nil
}
resp := new(integrationtypes.ServiceConnectionStatus)
wg := sync.WaitGroup{}
if len(ingestionStatusCheck.Metrics) > 0 && isMetricsEnabled {
wg.Add(1)
go func() {
defer utils.RecoverPanic(func(err interface{}, stack []byte) {
b.Logger.ErrorContext(
ctx, "panic while getting service metrics connection status",
"service", definition.GetId(),
"error", err,
"stack", string(stack),
)
})
defer wg.Done()
status, _ := b.getServiceMetricsConnectionStatus(ctx, cloudAccountID, orgID, definition)
resp.Metrics = status
}()
}
if len(ingestionStatusCheck.Logs) > 0 && isLogsEnabled {
wg.Add(1)
go func() {
defer utils.RecoverPanic(func(err interface{}, stack []byte) {
b.Logger.ErrorContext(
ctx, "panic while getting service logs connection status",
"service", definition.GetId(),
"error", err,
"stack", string(stack),
)
})
defer wg.Done()
status, _ := b.getServiceLogsConnectionStatus(ctx, cloudAccountID, orgID, definition)
resp.Logs = status
}()
}
wg.Wait()
return resp, nil
}
func (b *BaseCloudProvider[def, conf]) getServiceMetricsConnectionStatus(
ctx context.Context,
cloudAccountID string,
orgID valuer.UUID,
definition def,
) ([]*integrationtypes.SignalConnectionStatus, error) {
ingestionStatusCheck := definition.GetIngestionStatusCheck()
if ingestionStatusCheck == nil || len(ingestionStatusCheck.Metrics) < 1 {
return nil, nil
}
statusResp := make([]*integrationtypes.SignalConnectionStatus, 0)
for _, metric := range ingestionStatusCheck.Metrics {
statusResp = append(statusResp, &integrationtypes.SignalConnectionStatus{
CategoryID: metric.Category,
CategoryDisplayName: metric.DisplayName,
})
}
for index, category := range ingestionStatusCheck.Metrics {
queries := make([]qbtypes.QueryEnvelope, 0)
for _, check := range category.Checks {
// TODO: make sure all the cloud providers send these two attributes
// or create map of provider specific filter expression
filterExpression := fmt.Sprintf(`cloud.provider="%s" AND cloud.account.id="%s"`, b.ProviderType.String(), cloudAccountID)
f := ""
for _, attribute := range check.Attributes {
f = fmt.Sprintf("%s %s", attribute.Name, attribute.Operator)
if attribute.Value != "" {
f = fmt.Sprintf("%s '%s'", f, attribute.Value)
}
filterExpression = fmt.Sprintf("%s AND %s", filterExpression, f)
}
queries = append(queries, qbtypes.QueryEnvelope{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: valuer.GenerateUUID().String(),
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{{
MetricName: check.Key,
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationAvg,
}},
Filter: &qbtypes.Filter{
Expression: filterExpression,
},
},
})
}
resp, err := b.Querier.QueryRange(ctx, orgID, &qbtypes.QueryRangeRequest{
SchemaVersion: "v5",
Start: uint64(time.Now().Add(-time.Hour).UnixMilli()),
End: uint64(time.Now().UnixMilli()),
RequestType: qbtypes.RequestTypeScalar,
CompositeQuery: qbtypes.CompositeQuery{
Queries: queries,
},
})
if err != nil {
b.Logger.DebugContext(ctx,
"error querying for service metrics connection status",
"error", err,
"service", definition.GetId(),
)
continue
}
if resp != nil && len(resp.Data.Results) < 1 {
continue
}
queryResponse, ok := resp.Data.Results[0].(*qbtypes.TimeSeriesData)
if !ok {
b.Logger.ErrorContext(ctx, "unexpected query response type for service metrics connection status",
"service", definition.GetId(),
)
continue
}
if !hasValidTimeSeriesData(queryResponse) {
continue
}
statusResp[index] = &integrationtypes.SignalConnectionStatus{
CategoryID: category.Category,
CategoryDisplayName: category.DisplayName,
LastReceivedTsMillis: queryResponse.Aggregations[0].Series[0].Values[0].Timestamp,
LastReceivedFrom: fmt.Sprintf("signoz-%s-integration", b.ProviderType.String()),
}
}
return statusResp, nil
}
func (b *BaseCloudProvider[def, conf]) getServiceLogsConnectionStatus(
ctx context.Context,
cloudAccountID string,
orgID valuer.UUID,
definition def,
) ([]*integrationtypes.SignalConnectionStatus, error) {
ingestionStatusCheck := definition.GetIngestionStatusCheck()
if ingestionStatusCheck == nil || len(ingestionStatusCheck.Logs) < 1 {
return nil, nil
}
statusResp := make([]*integrationtypes.SignalConnectionStatus, 0)
for _, log := range ingestionStatusCheck.Logs {
statusResp = append(statusResp, &integrationtypes.SignalConnectionStatus{
CategoryID: log.Category,
CategoryDisplayName: log.DisplayName,
})
}
for index, category := range ingestionStatusCheck.Logs {
queries := make([]qbtypes.QueryEnvelope, 0)
for _, check := range category.Checks {
// TODO: make sure all the cloud providers send these two attributes
// or create map of provider specific filter expression
filterExpression := fmt.Sprintf(`cloud.account.id="%s"`, cloudAccountID)
f := ""
for _, attribute := range check.Attributes {
f = fmt.Sprintf("%s %s", attribute.Name, attribute.Operator)
if attribute.Value != "" {
f = fmt.Sprintf("%s '%s'", f, attribute.Value)
}
filterExpression = fmt.Sprintf("%s AND %s", filterExpression, f)
}
queries = append(queries, qbtypes.QueryEnvelope{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Name: valuer.GenerateUUID().String(),
Signal: telemetrytypes.SignalLogs,
Aggregations: []qbtypes.LogAggregation{{
Expression: "count()",
}},
Filter: &qbtypes.Filter{
Expression: filterExpression,
},
Limit: 10,
Offset: 0,
},
})
}
resp, err := b.Querier.QueryRange(ctx, orgID, &qbtypes.QueryRangeRequest{
SchemaVersion: "v1",
Start: uint64(time.Now().Add(-time.Hour * 1).UnixMilli()),
End: uint64(time.Now().UnixMilli()),
RequestType: qbtypes.RequestTypeTimeSeries,
CompositeQuery: qbtypes.CompositeQuery{
Queries: queries,
},
})
if err != nil {
b.Logger.DebugContext(ctx,
"error querying for service logs connection status",
"error", err,
"service", definition.GetId(),
)
continue
}
if resp != nil && len(resp.Data.Results) < 1 {
continue
}
queryResponse, ok := resp.Data.Results[0].(*qbtypes.TimeSeriesData)
if !ok {
b.Logger.ErrorContext(ctx, "unexpected query response type for service logs connection status",
"service", definition.GetId(),
)
continue
}
if !hasValidTimeSeriesData(queryResponse) {
continue
}
statusResp[index] = &integrationtypes.SignalConnectionStatus{
CategoryID: category.Category,
CategoryDisplayName: category.DisplayName,
LastReceivedTsMillis: queryResponse.Aggregations[0].Series[0].Values[0].Timestamp,
LastReceivedFrom: fmt.Sprintf("signoz-%s-integration", b.ProviderType.String()),
}
}
return statusResp, nil
}
func (b *BaseCloudProvider[def, conf]) GetAvailableDashboards(
ctx context.Context,
orgID valuer.UUID,
) ([]*dashboardtypes.Dashboard, error) {
accountRecords, err := b.AccountsRepo.ListConnected(ctx, orgID.StringValue(), b.ProviderType.String())
if err != nil {
return nil, err
}
servicesWithAvailableMetrics := map[string]*time.Time{}
for _, ar := range accountRecords {
if ar.AccountID != nil {
configsBySvcId, err := b.ServiceConfigRepo.GetAllForAccount(ctx, orgID.StringValue(), ar.ID.StringValue())
if err != nil {
return nil, err
}
for svcId, config := range configsBySvcId {
var serviceConfig conf
err = integrationtypes.UnmarshalJSON(config, &serviceConfig)
if err != nil {
return nil, err
}
if serviceConfig.IsMetricsEnabled() {
servicesWithAvailableMetrics[svcId] = &ar.CreatedAt
}
}
}
}
svcDashboards := make([]*dashboardtypes.Dashboard, 0)
allServices, err := b.ServiceDefinitions.ListServiceDefinitions(ctx)
if err != nil {
return nil, errors.WrapInternalf(err, errors.CodeInternal, "failed to list %s service definitions", b.ProviderType.String())
}
// accumulate definitions in a fixed order to ensure same order of dashboards across runs
svcIds := make([]string, 0, len(allServices))
for id := range allServices {
svcIds = append(svcIds, id)
}
sort.Strings(svcIds)
for _, svcId := range svcIds {
svc := allServices[svcId]
serviceDashboardsCreatedAt, ok := servicesWithAvailableMetrics[svcId]
if ok && serviceDashboardsCreatedAt != nil {
svcDashboards = append(
svcDashboards,
integrationtypes.GetDashboardsFromAssets(svc.GetId(), orgID, b.ProviderType, serviceDashboardsCreatedAt, svc.GetAssets())...,
)
servicesWithAvailableMetrics[svcId] = nil
}
}
return svcDashboards, nil
}
func (b *BaseCloudProvider[def, conf]) GetServiceConfig(
ctx context.Context,
definition def,
orgID valuer.UUID,
serviceId,
cloudAccountId string,
) (conf, error) {
var zero conf
activeAccount, err := b.AccountsRepo.GetConnectedCloudAccount(ctx, orgID.String(), b.ProviderType.String(), cloudAccountId)
if err != nil {
return zero, err
}
config, err := b.ServiceConfigRepo.Get(ctx, orgID.String(), activeAccount.ID.StringValue(), serviceId)
if err != nil {
if errors.Ast(err, errors.TypeNotFound) {
return zero, nil
}
return zero, err
}
var serviceConfig conf
err = integrationtypes.UnmarshalJSON(config, &serviceConfig)
if err != nil {
return zero, err
}
if config != nil && serviceConfig.IsMetricsEnabled() {
definition.PopulateDashboardURLs(b.ProviderType, serviceId)
}
return serviceConfig, nil
}
func (b *BaseCloudProvider[def, conf]) UpdateServiceConfig(ctx context.Context, req *integrationtypes.UpdatableServiceConfigReq) (any, error) {
definition, err := b.ServiceDefinitions.GetServiceDefinition(ctx, req.ServiceId)
if err != nil {
return nil, err
}
var updateReq integrationtypes.UpdatableCloudServiceConfig[conf]
err = integrationtypes.UnmarshalJSON(req.Config, &updateReq)
if err != nil {
return nil, err
}
// Check if config is provided (use any type assertion for nil check with generics)
if any(updateReq.Config) == nil {
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "config is required")
}
if err = updateReq.Config.Validate(definition); err != nil {
return nil, err
}
// can only update config for a connected cloud account id
_, err = b.AccountsRepo.GetConnectedCloudAccount(
ctx, req.OrgID, b.GetName().String(), updateReq.CloudAccountId,
)
if err != nil {
return nil, err
}
serviceConfigBytes, err := integrationtypes.MarshalJSON(&updateReq.Config)
if err != nil {
return nil, err
}
updatedConfigBytes, err := b.ServiceConfigRepo.Upsert(
ctx, req.OrgID, b.GetName().String(), updateReq.CloudAccountId, req.ServiceId, serviceConfigBytes,
)
if err != nil {
return nil, err
}
var updatedConfig conf
err = integrationtypes.UnmarshalJSON(updatedConfigBytes, &updatedConfig)
if err != nil {
return nil, err
}
return &integrationtypes.PatchServiceConfigResponse{
ServiceId: req.ServiceId,
Config: updatedConfig,
}, nil
}

View File

@@ -1,624 +0,0 @@
package cloudintegrations
import (
"context"
"fmt"
"net/url"
"slices"
"strings"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"golang.org/x/exp/maps"
)
var SupportedCloudProviders = []string{
"aws",
}
func validateCloudProviderName(name string) *model.ApiError {
if !slices.Contains(SupportedCloudProviders, name) {
return model.BadRequest(fmt.Errorf("invalid cloud provider: %s", name))
}
return nil
}
type Controller struct {
accountsRepo cloudProviderAccountsRepository
serviceConfigRepo ServiceConfigDatabase
}
func NewController(sqlStore sqlstore.SQLStore) (*Controller, error) {
accountsRepo, err := newCloudProviderAccountsRepository(sqlStore)
if err != nil {
return nil, fmt.Errorf("couldn't create cloud provider accounts repo: %w", err)
}
serviceConfigRepo, err := newServiceConfigRepository(sqlStore)
if err != nil {
return nil, fmt.Errorf("couldn't create cloud provider service config repo: %w", err)
}
return &Controller{
accountsRepo: accountsRepo,
serviceConfigRepo: serviceConfigRepo,
}, nil
}
type ConnectedAccountsListResponse struct {
Accounts []types.Account `json:"accounts"`
}
func (c *Controller) ListConnectedAccounts(ctx context.Context, orgId string, cloudProvider string) (
*ConnectedAccountsListResponse, *model.ApiError,
) {
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
return nil, apiErr
}
accountRecords, apiErr := c.accountsRepo.listConnected(ctx, orgId, cloudProvider)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't list cloud accounts")
}
connectedAccounts := []types.Account{}
for _, a := range accountRecords {
connectedAccounts = append(connectedAccounts, a.Account())
}
return &ConnectedAccountsListResponse{
Accounts: connectedAccounts,
}, nil
}
type GenerateConnectionUrlRequest struct {
// Optional. To be specified for updates.
AccountId *string `json:"account_id,omitempty"`
AccountConfig types.AccountConfig `json:"account_config"`
AgentConfig SigNozAgentConfig `json:"agent_config"`
}
type SigNozAgentConfig struct {
// The region in which SigNoz agent should be installed.
Region string `json:"region"`
IngestionUrl string `json:"ingestion_url"`
IngestionKey string `json:"ingestion_key"`
SigNozAPIUrl string `json:"signoz_api_url"`
SigNozAPIKey string `json:"signoz_api_key"`
Version string `json:"version,omitempty"`
}
type GenerateConnectionUrlResponse struct {
AccountId string `json:"account_id"`
ConnectionUrl string `json:"connection_url"`
}
func (c *Controller) GenerateConnectionUrl(ctx context.Context, orgId string, cloudProvider string, req GenerateConnectionUrlRequest) (*GenerateConnectionUrlResponse, *model.ApiError) {
// Account connection with a simple connection URL may not be available for all providers.
if cloudProvider != "aws" {
return nil, model.BadRequest(fmt.Errorf("unsupported cloud provider: %s", cloudProvider))
}
account, apiErr := c.accountsRepo.upsert(
ctx, orgId, cloudProvider, req.AccountId, &req.AccountConfig, nil, nil, nil,
)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
}
agentVersion := "v0.0.8"
if req.AgentConfig.Version != "" {
agentVersion = req.AgentConfig.Version
}
connectionUrl := fmt.Sprintf(
"https://%s.console.aws.amazon.com/cloudformation/home?region=%s#/stacks/quickcreate?",
req.AgentConfig.Region, req.AgentConfig.Region,
)
for qp, value := range map[string]string{
"param_SigNozIntegrationAgentVersion": agentVersion,
"param_SigNozApiUrl": req.AgentConfig.SigNozAPIUrl,
"param_SigNozApiKey": req.AgentConfig.SigNozAPIKey,
"param_SigNozAccountId": account.ID.StringValue(),
"param_IngestionUrl": req.AgentConfig.IngestionUrl,
"param_IngestionKey": req.AgentConfig.IngestionKey,
"stackName": "signoz-integration",
"templateURL": fmt.Sprintf(
"https://signoz-integrations.s3.us-east-1.amazonaws.com/aws-quickcreate-template-%s.json",
agentVersion,
),
} {
connectionUrl += fmt.Sprintf("&%s=%s", qp, url.QueryEscape(value))
}
return &GenerateConnectionUrlResponse{
AccountId: account.ID.StringValue(),
ConnectionUrl: connectionUrl,
}, nil
}
type AccountStatusResponse struct {
Id string `json:"id"`
CloudAccountId *string `json:"cloud_account_id,omitempty"`
Status types.AccountStatus `json:"status"`
}
func (c *Controller) GetAccountStatus(ctx context.Context, orgId string, cloudProvider string, accountId string) (
*AccountStatusResponse, *model.ApiError,
) {
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
return nil, apiErr
}
account, apiErr := c.accountsRepo.get(ctx, orgId, cloudProvider, accountId)
if apiErr != nil {
return nil, apiErr
}
resp := AccountStatusResponse{
Id: account.ID.StringValue(),
CloudAccountId: account.AccountID,
Status: account.Status(),
}
return &resp, nil
}
type AgentCheckInRequest struct {
ID string `json:"account_id"`
AccountID string `json:"cloud_account_id"`
// Arbitrary cloud specific Agent data
Data map[string]any `json:"data,omitempty"`
}
type AgentCheckInResponse struct {
AccountId string `json:"account_id"`
CloudAccountId string `json:"cloud_account_id"`
RemovedAt *time.Time `json:"removed_at"`
IntegrationConfig IntegrationConfigForAgent `json:"integration_config"`
}
type IntegrationConfigForAgent struct {
EnabledRegions []string `json:"enabled_regions"`
TelemetryCollectionStrategy *CompiledCollectionStrategy `json:"telemetry,omitempty"`
}
func (c *Controller) CheckInAsAgent(ctx context.Context, orgId string, cloudProvider string, req AgentCheckInRequest) (*AgentCheckInResponse, error) {
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
return nil, apiErr
}
existingAccount, apiErr := c.accountsRepo.get(ctx, orgId, cloudProvider, req.ID)
if existingAccount != nil && existingAccount.AccountID != nil && *existingAccount.AccountID != req.AccountID {
return nil, model.BadRequest(fmt.Errorf(
"can't check in with new %s account id %s for account %s with existing %s id %s",
cloudProvider, req.AccountID, existingAccount.ID.StringValue(), cloudProvider, *existingAccount.AccountID,
))
}
existingAccount, apiErr = c.accountsRepo.getConnectedCloudAccount(ctx, orgId, cloudProvider, req.AccountID)
if existingAccount != nil && existingAccount.ID.StringValue() != req.ID {
return nil, model.BadRequest(fmt.Errorf(
"can't check in to %s account %s with id %s. already connected with id %s",
cloudProvider, req.AccountID, req.ID, existingAccount.ID.StringValue(),
))
}
agentReport := types.AgentReport{
TimestampMillis: time.Now().UnixMilli(),
Data: req.Data,
}
account, apiErr := c.accountsRepo.upsert(
ctx, orgId, cloudProvider, &req.ID, nil, &req.AccountID, &agentReport, nil,
)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
}
// prepare and return integration config to be consumed by agent
compiledStrategy, err := NewCompiledCollectionStrategy(cloudProvider)
if err != nil {
return nil, model.InternalError(fmt.Errorf(
"couldn't init telemetry collection strategy: %w", err,
))
}
agentConfig := IntegrationConfigForAgent{
EnabledRegions: []string{},
TelemetryCollectionStrategy: compiledStrategy,
}
if account.Config != nil && account.Config.EnabledRegions != nil {
agentConfig.EnabledRegions = account.Config.EnabledRegions
}
services, err := services.Map(cloudProvider)
if err != nil {
return nil, err
}
svcConfigs, apiErr := c.serviceConfigRepo.getAllForAccount(
ctx, orgId, account.ID.StringValue(),
)
if apiErr != nil {
return nil, model.WrapApiError(
apiErr, "couldn't get service configs for cloud account",
)
}
// accumulate config in a fixed order to ensure same config generated across runs
configuredServices := maps.Keys(svcConfigs)
slices.Sort(configuredServices)
for _, svcType := range configuredServices {
definition, ok := services[svcType]
if !ok {
continue
}
config := svcConfigs[svcType]
err := AddServiceStrategy(svcType, compiledStrategy, definition.Strategy, config)
if err != nil {
return nil, err
}
}
return &AgentCheckInResponse{
AccountId: account.ID.StringValue(),
CloudAccountId: *account.AccountID,
RemovedAt: account.RemovedAt,
IntegrationConfig: agentConfig,
}, nil
}
type UpdateAccountConfigRequest struct {
Config types.AccountConfig `json:"config"`
}
func (c *Controller) UpdateAccountConfig(ctx context.Context, orgId string, cloudProvider string, accountId string, req UpdateAccountConfigRequest) (*types.Account, *model.ApiError) {
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
return nil, apiErr
}
accountRecord, apiErr := c.accountsRepo.upsert(
ctx, orgId, cloudProvider, &accountId, &req.Config, nil, nil, nil,
)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
}
account := accountRecord.Account()
return &account, nil
}
func (c *Controller) DisconnectAccount(ctx context.Context, orgId string, cloudProvider string, accountId string) (*types.CloudIntegration, *model.ApiError) {
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
return nil, apiErr
}
account, apiErr := c.accountsRepo.get(ctx, orgId, cloudProvider, accountId)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't disconnect account")
}
tsNow := time.Now()
account, apiErr = c.accountsRepo.upsert(
ctx, orgId, cloudProvider, &accountId, nil, nil, nil, &tsNow,
)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't disconnect account")
}
return account, nil
}
type ListServicesResponse struct {
Services []ServiceSummary `json:"services"`
}
func (c *Controller) ListServices(
ctx context.Context,
orgID string,
cloudProvider string,
cloudAccountId *string,
) (*ListServicesResponse, *model.ApiError) {
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
return nil, apiErr
}
definitions, apiErr := services.List(cloudProvider)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't list cloud services")
}
svcConfigs := map[string]*types.CloudServiceConfig{}
if cloudAccountId != nil {
activeAccount, apiErr := c.accountsRepo.getConnectedCloudAccount(
ctx, orgID, cloudProvider, *cloudAccountId,
)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't get active account")
}
svcConfigs, apiErr = c.serviceConfigRepo.getAllForAccount(
ctx, orgID, activeAccount.ID.StringValue(),
)
if apiErr != nil {
return nil, model.WrapApiError(
apiErr, "couldn't get service configs for cloud account",
)
}
}
summaries := []ServiceSummary{}
for _, def := range definitions {
summary := ServiceSummary{
Metadata: def.Metadata,
}
summary.Config = svcConfigs[summary.Id]
summaries = append(summaries, summary)
}
return &ListServicesResponse{
Services: summaries,
}, nil
}
func (c *Controller) GetServiceDetails(
ctx context.Context,
orgID string,
cloudProvider string,
serviceId string,
cloudAccountId *string,
) (*ServiceDetails, error) {
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
return nil, apiErr
}
definition, err := services.GetServiceDefinition(cloudProvider, serviceId)
if err != nil {
return nil, err
}
details := ServiceDetails{
Definition: *definition,
}
if cloudAccountId != nil {
activeAccount, apiErr := c.accountsRepo.getConnectedCloudAccount(
ctx, orgID, cloudProvider, *cloudAccountId,
)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't get active account")
}
config, apiErr := c.serviceConfigRepo.get(
ctx, orgID, activeAccount.ID.StringValue(), serviceId,
)
if apiErr != nil && apiErr.Type() != model.ErrorNotFound {
return nil, model.WrapApiError(apiErr, "couldn't fetch service config")
}
if config != nil {
details.Config = config
enabled := false
if config.Metrics != nil && config.Metrics.Enabled {
enabled = true
}
// add links to service dashboards, making them clickable.
for i, d := range definition.Assets.Dashboards {
dashboardUuid := c.dashboardUuid(
cloudProvider, serviceId, d.Id,
)
if enabled {
definition.Assets.Dashboards[i].Url = fmt.Sprintf("/dashboard/%s", dashboardUuid)
} else {
definition.Assets.Dashboards[i].Url = "" // to unset the in-memory URL if enabled once and disabled afterwards
}
}
}
}
return &details, nil
}
type UpdateServiceConfigRequest struct {
CloudAccountId string `json:"cloud_account_id"`
Config types.CloudServiceConfig `json:"config"`
}
func (u *UpdateServiceConfigRequest) Validate(def *services.Definition) error {
if def.Id != services.S3Sync && u.Config.Logs != nil && u.Config.Logs.S3Buckets != nil {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "s3 buckets can only be added to service-type[%s]", services.S3Sync)
} else if def.Id == services.S3Sync && u.Config.Logs != nil && u.Config.Logs.S3Buckets != nil {
for region := range u.Config.Logs.S3Buckets {
if _, found := ValidAWSRegions[region]; !found {
return errors.NewInvalidInputf(CodeInvalidCloudRegion, "invalid cloud region: %s", region)
}
}
}
return nil
}
type UpdateServiceConfigResponse struct {
Id string `json:"id"`
Config types.CloudServiceConfig `json:"config"`
}
func (c *Controller) UpdateServiceConfig(
ctx context.Context,
orgID string,
cloudProvider string,
serviceType string,
req *UpdateServiceConfigRequest,
) (*UpdateServiceConfigResponse, error) {
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
return nil, apiErr
}
// can only update config for a valid service.
definition, err := services.GetServiceDefinition(cloudProvider, serviceType)
if err != nil {
return nil, err
}
if err := req.Validate(definition); err != nil {
return nil, err
}
// can only update config for a connected cloud account id
_, apiErr := c.accountsRepo.getConnectedCloudAccount(
ctx, orgID, cloudProvider, req.CloudAccountId,
)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't find connected cloud account")
}
updatedConfig, apiErr := c.serviceConfigRepo.upsert(
ctx, orgID, cloudProvider, req.CloudAccountId, serviceType, req.Config,
)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't update service config")
}
return &UpdateServiceConfigResponse{
Id: serviceType,
Config: *updatedConfig,
}, nil
}
// All dashboards that are available based on cloud integrations configuration
// across all cloud providers
func (c *Controller) AvailableDashboards(ctx context.Context, orgId valuer.UUID) ([]*dashboardtypes.Dashboard, *model.ApiError) {
allDashboards := []*dashboardtypes.Dashboard{}
for _, provider := range []string{"aws"} {
providerDashboards, apiErr := c.AvailableDashboardsForCloudProvider(ctx, orgId, provider)
if apiErr != nil {
return nil, model.WrapApiError(
apiErr, fmt.Sprintf("couldn't get available dashboards for %s", provider),
)
}
allDashboards = append(allDashboards, providerDashboards...)
}
return allDashboards, nil
}
func (c *Controller) AvailableDashboardsForCloudProvider(ctx context.Context, orgID valuer.UUID, cloudProvider string) ([]*dashboardtypes.Dashboard, *model.ApiError) {
accountRecords, apiErr := c.accountsRepo.listConnected(ctx, orgID.StringValue(), cloudProvider)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't list connected cloud accounts")
}
// for v0, service dashboards are only available when metrics are enabled.
servicesWithAvailableMetrics := map[string]*time.Time{}
for _, ar := range accountRecords {
if ar.AccountID != nil {
configsBySvcId, apiErr := c.serviceConfigRepo.getAllForAccount(
ctx, orgID.StringValue(), ar.ID.StringValue(),
)
if apiErr != nil {
return nil, apiErr
}
for svcId, config := range configsBySvcId {
if config.Metrics != nil && config.Metrics.Enabled {
servicesWithAvailableMetrics[svcId] = &ar.CreatedAt
}
}
}
}
allServices, apiErr := services.List(cloudProvider)
if apiErr != nil {
return nil, apiErr
}
svcDashboards := []*dashboardtypes.Dashboard{}
for _, svc := range allServices {
serviceDashboardsCreatedAt := servicesWithAvailableMetrics[svc.Id]
if serviceDashboardsCreatedAt != nil {
for _, d := range svc.Assets.Dashboards {
author := fmt.Sprintf("%s-integration", cloudProvider)
svcDashboards = append(svcDashboards, &dashboardtypes.Dashboard{
ID: c.dashboardUuid(cloudProvider, svc.Id, d.Id),
Locked: true,
Data: *d.Definition,
TimeAuditable: types.TimeAuditable{
CreatedAt: *serviceDashboardsCreatedAt,
UpdatedAt: *serviceDashboardsCreatedAt,
},
UserAuditable: types.UserAuditable{
CreatedBy: author,
UpdatedBy: author,
},
OrgID: orgID,
})
}
servicesWithAvailableMetrics[svc.Id] = nil
}
}
return svcDashboards, nil
}
func (c *Controller) GetDashboardById(ctx context.Context, orgId valuer.UUID, dashboardUuid string) (*dashboardtypes.Dashboard, *model.ApiError) {
cloudProvider, _, _, apiErr := c.parseDashboardUuid(dashboardUuid)
if apiErr != nil {
return nil, apiErr
}
allDashboards, apiErr := c.AvailableDashboardsForCloudProvider(ctx, orgId, cloudProvider)
if apiErr != nil {
return nil, model.WrapApiError(apiErr, "couldn't list available dashboards")
}
for _, d := range allDashboards {
if d.ID == dashboardUuid {
return d, nil
}
}
return nil, model.NotFoundError(fmt.Errorf("couldn't find dashboard with uuid: %s", dashboardUuid))
}
func (c *Controller) dashboardUuid(
cloudProvider string, svcId string, dashboardId string,
) string {
return fmt.Sprintf("cloud-integration--%s--%s--%s", cloudProvider, svcId, dashboardId)
}
func (c *Controller) parseDashboardUuid(dashboardUuid string) (cloudProvider string, svcId string, dashboardId string, apiErr *model.ApiError) {
parts := strings.SplitN(dashboardUuid, "--", 4)
if len(parts) != 4 || parts[0] != "cloud-integration" {
return "", "", "", model.BadRequest(fmt.Errorf("invalid cloud integration dashboard id"))
}
return parts[1], parts[2], parts[3], nil
}
func (c *Controller) IsCloudIntegrationDashboardUuid(dashboardUuid string) bool {
_, _, _, apiErr := c.parseDashboardUuid(dashboardUuid)
return apiErr == nil
}

View File

@@ -0,0 +1,339 @@
package implawsprovider
import (
"context"
"fmt"
"log/slog"
"net/url"
"slices"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/querier"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/baseprovider"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
integrationstore "github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/store"
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"golang.org/x/exp/maps"
)
var (
CodeInvalidAWSRegion = errors.MustNewCode("invalid_aws_region")
)
type awsProvider struct {
baseprovider.BaseCloudProvider[*integrationtypes.AWSDefinition, *integrationtypes.AWSCloudServiceConfig]
}
func NewAWSCloudProvider(
logger *slog.Logger,
accountsRepo integrationstore.CloudProviderAccountsRepository,
serviceConfigRepo integrationstore.ServiceConfigDatabase,
querier querier.Querier,
) (integrationtypes.CloudProvider, error) {
serviceDefinitions, err := services.NewAWSCloudProviderServices()
if err != nil {
return nil, err
}
return &awsProvider{
BaseCloudProvider: baseprovider.BaseCloudProvider[*integrationtypes.AWSDefinition, *integrationtypes.AWSCloudServiceConfig]{
Logger: logger,
Querier: querier,
AccountsRepo: accountsRepo,
ServiceConfigRepo: serviceConfigRepo,
ServiceDefinitions: serviceDefinitions,
ProviderType: integrationtypes.CloudProviderAWS,
},
}, nil
}
func (a *awsProvider) AgentCheckIn(ctx context.Context, req *integrationtypes.PostableAgentCheckInPayload) (any, error) {
return baseprovider.AgentCheckIn(
&a.BaseCloudProvider,
ctx,
req,
a.getAWSAgentConfig,
)
}
func (a *awsProvider) getAWSAgentConfig(ctx context.Context, account *integrationtypes.CloudIntegration) (*integrationtypes.AWSAgentIntegrationConfig, error) {
// prepare and return integration config to be consumed by agent
agentConfig := &integrationtypes.AWSAgentIntegrationConfig{
EnabledRegions: []string{},
TelemetryCollectionStrategy: &integrationtypes.AWSCollectionStrategy{
Metrics: &integrationtypes.AWSMetricsStrategy{},
Logs: &integrationtypes.AWSLogsStrategy{},
S3Buckets: map[string][]string{},
},
}
accountConfig := new(integrationtypes.AWSAccountConfig)
err := integrationtypes.UnmarshalJSON([]byte(account.Config), accountConfig)
if err != nil {
return nil, err
}
if accountConfig.EnabledRegions != nil {
agentConfig.EnabledRegions = accountConfig.EnabledRegions
}
svcConfigs, err := a.ServiceConfigRepo.GetAllForAccount(
ctx, account.OrgID, account.ID.StringValue(),
)
if err != nil {
return nil, err
}
// accumulate config in a fixed order to ensure same config generated across runs
configuredServices := maps.Keys(svcConfigs)
slices.Sort(configuredServices)
for _, svcType := range configuredServices {
definition, err := a.ServiceDefinitions.GetServiceDefinition(ctx, svcType)
if err != nil {
continue
}
config := svcConfigs[svcType]
serviceConfig := new(integrationtypes.AWSCloudServiceConfig)
err = integrationtypes.UnmarshalJSON(config, serviceConfig)
if err != nil {
continue
}
if serviceConfig.IsLogsEnabled() {
if svcType == integrationtypes.S3Sync {
// S3 bucket sync; No cloudwatch logs are appended for this service type;
// Though definition is populated with a custom cloudwatch group that helps in calculating logs connection status
agentConfig.TelemetryCollectionStrategy.S3Buckets = serviceConfig.Logs.S3Buckets
} else if definition.Strategy.Logs != nil { // services that includes a logs subscription
agentConfig.TelemetryCollectionStrategy.Logs.Subscriptions = append(
agentConfig.TelemetryCollectionStrategy.Logs.Subscriptions,
definition.Strategy.Logs.Subscriptions...,
)
}
}
if serviceConfig.IsMetricsEnabled() && definition.Strategy.Metrics != nil {
agentConfig.TelemetryCollectionStrategy.Metrics.StreamFilters = append(
agentConfig.TelemetryCollectionStrategy.Metrics.StreamFilters,
definition.Strategy.Metrics.StreamFilters...,
)
}
}
return agentConfig, nil
}
func (a *awsProvider) ListServices(ctx context.Context, orgID string, cloudAccountID *string) (any, error) {
svcConfigs := make(map[string]*integrationtypes.AWSCloudServiceConfig)
if cloudAccountID != nil {
activeAccount, err := a.AccountsRepo.GetConnectedCloudAccount(ctx, orgID, a.GetName().String(), *cloudAccountID)
if err != nil {
return nil, err
}
serviceConfigs, err := a.ServiceConfigRepo.GetAllForAccount(ctx, orgID, activeAccount.ID.String())
if err != nil {
return nil, err
}
for svcType, config := range serviceConfigs {
serviceConfig := new(integrationtypes.AWSCloudServiceConfig)
err = integrationtypes.UnmarshalJSON(config, serviceConfig)
if err != nil {
return nil, err
}
svcConfigs[svcType] = serviceConfig
}
}
summaries := make([]integrationtypes.AWSServiceSummary, 0)
definitions, err := a.ServiceDefinitions.ListServiceDefinitions(ctx)
if err != nil {
return nil, err
}
for _, def := range definitions {
summary := integrationtypes.AWSServiceSummary{
DefinitionMetadata: def.DefinitionMetadata,
Config: nil,
}
summary.Config = svcConfigs[summary.Id]
summaries = append(summaries, summary)
}
slices.SortFunc(summaries, func(a, b integrationtypes.AWSServiceSummary) int {
if a.DefinitionMetadata.Title < b.DefinitionMetadata.Title {
return -1
}
if a.DefinitionMetadata.Title > b.DefinitionMetadata.Title {
return 1
}
return 0
})
return &integrationtypes.GettableAWSServices{
Services: summaries,
}, nil
}
func (a *awsProvider) GetServiceDetails(ctx context.Context, req *integrationtypes.GetServiceDetailsReq) (any, error) {
details := new(integrationtypes.GettableAWSServiceDetails)
awsDefinition, err := a.ServiceDefinitions.GetServiceDefinition(ctx, req.ServiceId)
if err != nil {
return nil, err
}
details.AWSDefinition = *awsDefinition
if req.CloudAccountID == nil {
return details, nil
}
config, err := a.GetServiceConfig(ctx, awsDefinition, req.OrgID, req.ServiceId, *req.CloudAccountID)
if err != nil {
return nil, err
}
if config == nil {
return details, nil
}
details.Config = config
isMetricsEnabled := config.IsMetricsEnabled()
isLogsEnabled := config.IsLogsEnabled()
connectionStatus, err := a.GetServiceConnectionStatus(
ctx,
*req.CloudAccountID,
req.OrgID,
awsDefinition,
isMetricsEnabled,
isLogsEnabled,
)
if err != nil {
return nil, err
}
details.ConnectionStatus = connectionStatus
return details, nil
}
func (a *awsProvider) GetAvailableDashboards(ctx context.Context, orgID valuer.UUID) ([]*dashboardtypes.Dashboard, error) {
return a.BaseCloudProvider.GetAvailableDashboards(ctx, orgID)
}
func (a *awsProvider) GetDashboard(ctx context.Context, req *integrationtypes.GettableDashboard) (*dashboardtypes.Dashboard, error) {
return a.BaseCloudProvider.GetDashboard(ctx, req)
}
func (a *awsProvider) GenerateConnectionArtifact(ctx context.Context, req *integrationtypes.PostableConnectionArtifact) (any, error) {
connection := new(integrationtypes.PostableAWSConnectionUrl)
err := integrationtypes.UnmarshalJSON(req.Data, connection)
if err != nil {
return nil, err
}
if connection.AccountConfig != nil {
for _, region := range connection.AccountConfig.EnabledRegions {
if integrationtypes.ValidAWSRegions[region] {
continue
}
return nil, errors.NewInvalidInputf(CodeInvalidAWSRegion, "invalid aws region: %s", region)
}
}
config, err := integrationtypes.MarshalJSON(connection.AccountConfig)
if err != nil {
return nil, err
}
account, err := a.AccountsRepo.Upsert(
ctx, req.OrgID, integrationtypes.CloudProviderAWS.String(), nil, config,
nil, nil, nil,
)
if err != nil {
return nil, err
}
agentVersion := "v0.0.8"
if connection.AgentConfig.Version != "" {
agentVersion = connection.AgentConfig.Version
}
baseURL := fmt.Sprintf("https://%s.console.aws.amazon.com/cloudformation/home",
connection.AgentConfig.Region)
u, _ := url.Parse(baseURL)
q := u.Query()
q.Set("region", connection.AgentConfig.Region)
u.Fragment = "/stacks/quickcreate"
u.RawQuery = q.Encode()
q = u.Query()
q.Set("stackName", "signoz-integration")
q.Set("templateURL", fmt.Sprintf("https://signoz-integrations.s3.us-east-1.amazonaws.com/aws-quickcreate-template-%s.json", agentVersion))
q.Set("param_SigNozIntegrationAgentVersion", agentVersion)
q.Set("param_SigNozApiUrl", connection.AgentConfig.SigNozAPIUrl)
q.Set("param_SigNozApiKey", connection.AgentConfig.SigNozAPIKey)
q.Set("param_SigNozAccountId", account.ID.StringValue())
q.Set("param_IngestionUrl", connection.AgentConfig.IngestionUrl)
q.Set("param_IngestionKey", connection.AgentConfig.IngestionKey)
return &integrationtypes.GettableAWSConnectionUrl{
AccountId: account.ID.StringValue(),
ConnectionUrl: u.String() + "?&" + q.Encode(), // this format is required by AWS
}, nil
}
func (a *awsProvider) UpdateAccountConfig(ctx context.Context, req *integrationtypes.PatchableAccountConfig) (any, error) {
config := new(integrationtypes.PatchableAWSAccountConfig)
err := integrationtypes.UnmarshalJSON(req.Data, config)
if err != nil {
return nil, err
}
if config.Config == nil {
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "account config can't be null")
}
for _, region := range config.Config.EnabledRegions {
if integrationtypes.ValidAWSRegions[region] {
continue
}
return nil, errors.NewInvalidInputf(CodeInvalidAWSRegion, "invalid aws region: %s", region)
}
configBytes, err := integrationtypes.MarshalJSON(config.Config)
if err != nil {
return nil, err
}
// account must exist to update config, but it doesn't need to be connected
_, err = a.AccountsRepo.Get(ctx, req.OrgID, a.GetName().String(), req.AccountId)
if err != nil {
return nil, err
}
accountRecord, err := a.AccountsRepo.Upsert(
ctx, req.OrgID, a.GetName().String(), &req.AccountId, configBytes, nil, nil, nil,
)
if err != nil {
return nil, err
}
return accountRecord.Account(a.GetName()), nil
}

View File

@@ -0,0 +1,376 @@
package implazureprovider
import (
"context"
"fmt"
"log/slog"
"slices"
"strings"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/querier"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/baseprovider"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/store"
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"golang.org/x/exp/maps"
)
var (
CodeInvalidAzureRegion = errors.MustNewCode("invalid_azure_region")
)
type azureProvider struct {
baseprovider.BaseCloudProvider[*integrationtypes.AzureDefinition, *integrationtypes.AzureCloudServiceConfig]
}
func NewAzureCloudProvider(
logger *slog.Logger,
accountsRepo store.CloudProviderAccountsRepository,
serviceConfigRepo store.ServiceConfigDatabase,
querier querier.Querier,
) (integrationtypes.CloudProvider, error) {
azureServiceDefinitions, err := services.NewAzureCloudProviderServices()
if err != nil {
return nil, err
}
return &azureProvider{
BaseCloudProvider: baseprovider.BaseCloudProvider[*integrationtypes.AzureDefinition, *integrationtypes.AzureCloudServiceConfig]{
Logger: logger,
Querier: querier,
AccountsRepo: accountsRepo,
ServiceConfigRepo: serviceConfigRepo,
ServiceDefinitions: azureServiceDefinitions,
ProviderType: integrationtypes.CloudProviderAzure,
},
}, nil
}
func (a *azureProvider) AgentCheckIn(ctx context.Context, req *integrationtypes.PostableAgentCheckInPayload) (any, error) {
return baseprovider.AgentCheckIn(
&a.BaseCloudProvider,
ctx,
req,
a.getAzureAgentConfig,
)
}
func (a *azureProvider) getAzureAgentConfig(ctx context.Context, account *integrationtypes.CloudIntegration) (*integrationtypes.AzureAgentIntegrationConfig, error) {
// prepare and return integration config to be consumed by agent
agentConfig := &integrationtypes.AzureAgentIntegrationConfig{
TelemetryCollectionStrategy: make(map[string]*integrationtypes.AzureCollectionStrategy),
}
accountConfig := new(integrationtypes.AzureAccountConfig)
err := integrationtypes.UnmarshalJSON([]byte(account.Config), accountConfig)
if err != nil {
return nil, err
}
if account.Config != "" {
agentConfig.DeploymentRegion = accountConfig.DeploymentRegion
agentConfig.EnabledResourceGroups = accountConfig.EnabledResourceGroups
}
svcConfigs, err := a.ServiceConfigRepo.GetAllForAccount(
ctx, account.OrgID, account.ID.StringValue(),
)
if err != nil {
return nil, err
}
// accumulate config in a fixed order to ensure same config generated across runs
configuredServices := maps.Keys(svcConfigs)
slices.Sort(configuredServices)
for _, svcType := range configuredServices {
definition, err := a.ServiceDefinitions.GetServiceDefinition(ctx, svcType)
if err != nil {
continue
}
config := svcConfigs[svcType]
serviceConfig := new(integrationtypes.AzureCloudServiceConfig)
err = integrationtypes.UnmarshalJSON(config, serviceConfig)
if err != nil {
continue
}
metrics := make([]*integrationtypes.AzureMetricsStrategy, 0)
logs := make([]*integrationtypes.AzureLogsStrategy, 0)
metricsStrategyMap := make(map[string]*integrationtypes.AzureMetricsStrategy)
logsStrategyMap := make(map[string]*integrationtypes.AzureLogsStrategy)
if definition.Strategy != nil && definition.Strategy.Metrics != nil {
for _, metric := range definition.Strategy.Metrics {
metricsStrategyMap[metric.Name] = metric
}
}
if definition.Strategy != nil && definition.Strategy.Logs != nil {
for _, log := range definition.Strategy.Logs {
logsStrategyMap[log.Name] = log
}
}
if serviceConfig.Metrics != nil {
for _, metric := range serviceConfig.Metrics {
if metric.Enabled {
metrics = append(metrics, &integrationtypes.AzureMetricsStrategy{
CategoryType: metricsStrategyMap[metric.Name].CategoryType,
Name: metric.Name,
})
}
}
}
if serviceConfig.Logs != nil {
for _, log := range serviceConfig.Logs {
if log.Enabled {
logs = append(logs, &integrationtypes.AzureLogsStrategy{
CategoryType: logsStrategyMap[log.Name].CategoryType,
Name: log.Name,
})
}
}
}
strategy := &integrationtypes.AzureCollectionStrategy{
Metrics: metrics,
Logs: logs,
}
agentConfig.TelemetryCollectionStrategy[svcType] = strategy
}
return agentConfig, nil
}
func (a *azureProvider) ListServices(ctx context.Context, orgID string, cloudAccountID *string) (any, error) {
svcConfigs := make(map[string]*integrationtypes.AzureCloudServiceConfig)
if cloudAccountID != nil {
activeAccount, err := a.AccountsRepo.GetConnectedCloudAccount(ctx, orgID, a.GetName().String(), *cloudAccountID)
if err != nil {
return nil, err
}
serviceConfigs, err := a.ServiceConfigRepo.GetAllForAccount(ctx, orgID, activeAccount.ID.StringValue())
if err != nil {
return nil, err
}
for svcType, config := range serviceConfigs {
serviceConfig := new(integrationtypes.AzureCloudServiceConfig)
err = integrationtypes.UnmarshalJSON(config, serviceConfig)
if err != nil {
return nil, err
}
svcConfigs[svcType] = serviceConfig
}
}
summaries := make([]integrationtypes.AzureServiceSummary, 0)
definitions, err := a.ServiceDefinitions.ListServiceDefinitions(ctx)
if err != nil {
return nil, err
}
for _, def := range definitions {
summary := integrationtypes.AzureServiceSummary{
DefinitionMetadata: def.DefinitionMetadata,
Config: nil,
}
summary.Config = svcConfigs[summary.Id]
summaries = append(summaries, summary)
}
slices.SortFunc(summaries, func(a, b integrationtypes.AzureServiceSummary) int {
if a.DefinitionMetadata.Title < b.DefinitionMetadata.Title {
return -1
}
if a.DefinitionMetadata.Title > b.DefinitionMetadata.Title {
return 1
}
return 0
})
return &integrationtypes.GettableAzureServices{
Services: summaries,
}, nil
}
func (a *azureProvider) GetServiceDetails(ctx context.Context, req *integrationtypes.GetServiceDetailsReq) (any, error) {
details := new(integrationtypes.GettableAzureServiceDetails)
azureDefinition, err := a.ServiceDefinitions.GetServiceDefinition(ctx, req.ServiceId)
if err != nil {
return nil, err
}
details.AzureDefinition = *azureDefinition
if req.CloudAccountID == nil {
return details, nil
}
config, err := a.GetServiceConfig(ctx, azureDefinition, req.OrgID, req.ServiceId, *req.CloudAccountID)
if err != nil {
return nil, err
}
details.Config = config
// fill default values for config
if details.Config == nil {
cfg := new(integrationtypes.AzureCloudServiceConfig)
logs := make([]*integrationtypes.AzureCloudServiceLogsConfig, 0)
if azureDefinition.Strategy != nil && azureDefinition.Strategy.Logs != nil {
for _, log := range azureDefinition.Strategy.Logs {
logs = append(logs, &integrationtypes.AzureCloudServiceLogsConfig{
Enabled: false,
Name: log.Name,
})
}
}
metrics := make([]*integrationtypes.AzureCloudServiceMetricsConfig, 0)
if azureDefinition.Strategy != nil && azureDefinition.Strategy.Metrics != nil {
for _, metric := range azureDefinition.Strategy.Metrics {
metrics = append(metrics, &integrationtypes.AzureCloudServiceMetricsConfig{
Enabled: false,
Name: metric.Name,
})
}
}
cfg.Logs = logs
cfg.Metrics = metrics
details.Config = cfg
}
isMetricsEnabled := details.Config != nil && details.Config.IsMetricsEnabled()
isLogsEnabled := details.Config != nil && details.Config.IsLogsEnabled()
connectionStatus, err := a.GetServiceConnectionStatus(
ctx,
*req.CloudAccountID,
req.OrgID,
azureDefinition,
isMetricsEnabled,
isLogsEnabled,
)
if err != nil {
return nil, err
}
details.ConnectionStatus = connectionStatus
return details, nil
}
func (a *azureProvider) GetAvailableDashboards(ctx context.Context, orgID valuer.UUID) ([]*dashboardtypes.Dashboard, error) {
return a.BaseCloudProvider.GetAvailableDashboards(ctx, orgID)
}
func (a *azureProvider) GetDashboard(ctx context.Context, req *integrationtypes.GettableDashboard) (*dashboardtypes.Dashboard, error) {
return a.BaseCloudProvider.GetDashboard(ctx, req)
}
func (a *azureProvider) GenerateConnectionArtifact(ctx context.Context, req *integrationtypes.PostableConnectionArtifact) (any, error) {
connection := new(integrationtypes.PostableAzureConnectionCommand)
err := integrationtypes.UnmarshalJSON(req.Data, connection)
if err != nil {
return nil, errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "failed unmarshal request data into AWS connection config")
}
// validate connection config
if connection.AccountConfig != nil {
if !integrationtypes.ValidAzureRegions[connection.AccountConfig.DeploymentRegion] {
return nil, errors.NewInvalidInputf(CodeInvalidAzureRegion, "invalid azure region: %s",
connection.AccountConfig.DeploymentRegion,
)
}
}
config, err := integrationtypes.MarshalJSON(connection.AccountConfig)
if err != nil {
return nil, err
}
account, err := a.AccountsRepo.Upsert(
ctx, req.OrgID, a.GetName().String(), nil, config,
nil, nil, nil,
)
if err != nil {
return nil, err
}
agentVersion := "v0.0.8"
if connection.AgentConfig.Version != "" {
agentVersion = connection.AgentConfig.Version
}
// TODO: improve the command and set url
cliCommand := []string{"az", "stack", "sub", "create", "--name", "SigNozIntegration", "--location",
connection.AccountConfig.DeploymentRegion, "--template-uri", fmt.Sprintf("<url>%s", agentVersion),
"--action-on-unmanage", "deleteAll", "--deny-settings-mode", "denyDelete", "--parameters", fmt.Sprintf("rgName=%s", "signoz-integration-rg"),
fmt.Sprintf("rgLocation=%s", connection.AccountConfig.DeploymentRegion)}
return &integrationtypes.GettableAzureConnectionCommand{
AccountId: account.ID.String(),
AzureShellConnectionCommand: "az create",
AzureCliConnectionCommand: strings.Join(cliCommand, " "),
}, nil
}
func (a *azureProvider) UpdateAccountConfig(ctx context.Context, req *integrationtypes.PatchableAccountConfig) (any, error) {
config := new(integrationtypes.PatchableAzureAccountConfig)
err := integrationtypes.UnmarshalJSON(req.Data, config)
if err != nil {
return nil, err
}
if config.Config == nil && len(config.Config.EnabledResourceGroups) < 1 {
return nil, errors.NewInvalidInputf(CodeInvalidAzureRegion, "azure region and resource groups must be provided")
}
//for azure, preserve deployment region if already set
account, err := a.AccountsRepo.Get(ctx, req.OrgID, a.GetName().String(), req.AccountId)
if err != nil {
return nil, err
}
storedConfig := new(integrationtypes.AzureAccountConfig)
err = integrationtypes.UnmarshalJSON([]byte(account.Config), storedConfig)
if err != nil {
return nil, err
}
if account.Config != "" {
config.Config.DeploymentRegion = storedConfig.DeploymentRegion
}
configBytes, err := integrationtypes.MarshalJSON(config.Config)
if err != nil {
return nil, err
}
accountRecord, err := a.AccountsRepo.Upsert(
ctx, req.OrgID, a.GetName().String(), &req.AccountId, configBytes, nil, nil, nil,
)
if err != nil {
return nil, err
}
return accountRecord.Account(a.GetName()), nil
}

View File

@@ -1,94 +1 @@
package cloudintegrations
import (
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
"github.com/SigNoz/signoz/pkg/types"
)
type ServiceSummary struct {
services.Metadata
Config *types.CloudServiceConfig `json:"config"`
}
type ServiceDetails struct {
services.Definition
Config *types.CloudServiceConfig `json:"config"`
ConnectionStatus *ServiceConnectionStatus `json:"status,omitempty"`
}
type AccountStatus struct {
Integration AccountIntegrationStatus `json:"integration"`
}
type AccountIntegrationStatus struct {
LastHeartbeatTsMillis *int64 `json:"last_heartbeat_ts_ms"`
}
type LogsConfig struct {
Enabled bool `json:"enabled"`
S3Buckets map[string][]string `json:"s3_buckets,omitempty"`
}
type MetricsConfig struct {
Enabled bool `json:"enabled"`
}
type ServiceConnectionStatus struct {
Logs *SignalConnectionStatus `json:"logs"`
Metrics *SignalConnectionStatus `json:"metrics"`
}
type SignalConnectionStatus struct {
LastReceivedTsMillis int64 `json:"last_received_ts_ms"` // epoch milliseconds
LastReceivedFrom string `json:"last_received_from"` // resource identifier
}
type CompiledCollectionStrategy = services.CollectionStrategy
func NewCompiledCollectionStrategy(provider string) (*CompiledCollectionStrategy, error) {
if provider == "aws" {
return &CompiledCollectionStrategy{
Provider: "aws",
AWSMetrics: &services.AWSMetricsStrategy{},
AWSLogs: &services.AWSLogsStrategy{},
}, nil
}
return nil, errors.NewNotFoundf(services.CodeUnsupportedCloudProvider, "unsupported cloud provider: %s", provider)
}
// Helper for accumulating strategies for enabled services.
func AddServiceStrategy(serviceType string, cs *CompiledCollectionStrategy,
definitionStrat *services.CollectionStrategy, config *types.CloudServiceConfig) error {
if definitionStrat.Provider != cs.Provider {
return errors.NewInternalf(CodeMismatchCloudProvider, "can't add %s service strategy to compiled strategy for %s",
definitionStrat.Provider, cs.Provider)
}
if cs.Provider == "aws" {
if config.Logs != nil && config.Logs.Enabled {
if serviceType == services.S3Sync {
// S3 bucket sync; No cloudwatch logs are appended for this service type;
// Though definition is populated with a custom cloudwatch group that helps in calculating logs connection status
cs.S3Buckets = config.Logs.S3Buckets
} else if definitionStrat.AWSLogs != nil { // services that includes a logs subscription
cs.AWSLogs.Subscriptions = append(
cs.AWSLogs.Subscriptions,
definitionStrat.AWSLogs.Subscriptions...,
)
}
}
if config.Metrics != nil && config.Metrics.Enabled && definitionStrat.AWSMetrics != nil {
cs.AWSMetrics.StreamFilters = append(
cs.AWSMetrics.StreamFilters,
definitionStrat.AWSMetrics.StreamFilters...,
)
}
return nil
}
return errors.NewNotFoundf(services.CodeUnsupportedCloudProvider, "unsupported cloud provider: %s", cs.Provider)
}

View File

@@ -0,0 +1,37 @@
package cloudintegrations
import (
"log/slog"
"github.com/SigNoz/signoz/pkg/querier"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/implawsprovider"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/implazureprovider"
integrationstore "github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/store"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
)
func NewCloudProviderRegistry(
logger *slog.Logger,
store sqlstore.SQLStore,
querier querier.Querier,
) (map[integrationtypes.CloudProviderType]integrationtypes.CloudProvider, error) {
registry := make(map[integrationtypes.CloudProviderType]integrationtypes.CloudProvider)
accountsRepo := integrationstore.NewCloudProviderAccountsRepository(store)
serviceConfigRepo := integrationstore.NewServiceConfigRepository(store)
awsProviderImpl, err := implawsprovider.NewAWSCloudProvider(logger, accountsRepo, serviceConfigRepo, querier)
if err != nil {
return nil, err
}
registry[integrationtypes.CloudProviderAWS] = awsProviderImpl
azureProviderImpl, err := implazureprovider.NewAzureCloudProvider(logger, accountsRepo, serviceConfigRepo, querier)
if err != nil {
return nil, err
}
registry[integrationtypes.CloudProviderAzure] = azureProviderImpl
return registry, nil
}

View File

@@ -7,6 +7,24 @@
"metrics": true,
"logs": false
},
"ingestion_status_check": {
"metrics": [
{
"category": "$default",
"display_name": "Default",
"checks": [
{
"key": "aws_ApplicationELB_ConsumedLCUs_count",
"attributes": []
},
{
"key": "aws_ApplicationELB_ProcessedBytes_sum",
"attributes": []
}
]
}
]
},
"data_collected": {
"metrics": [
{

View File

@@ -7,6 +7,75 @@
"metrics": true,
"logs": true
},
"ingestion_status_check": {
"metrics": [
{
"category": "rest_api",
"display_name": "REST API Metrics",
"checks": [
{
"key": "aws_ApiGateway_Count_count",
"attributes": [
{
"name": "ApiName",
"operator": "EXISTS",
"value": ""
}
]
}
]
},
{
"category": "http_api",
"display_name": "HTTP API Metrics",
"checks": [
{
"key": "aws_ApiGateway_Count_count",
"attributes": [
{
"name": "ApiId",
"operator": "EXISTS",
"value": ""
}
]
}
]
},
{
"category": "websocket_api",
"display_name": "Websocket API Metrics",
"checks": [
{
"key": "aws_ApiGateway_Count_count",
"attributes": [
{
"name": "ApiId",
"operator": "EXISTS",
"value": ""
}
]
}
]
}
],
"logs": [
{
"category": "$default",
"display_name": "Default",
"checks": [
{
"attributes": [
{
"name": "aws.cloudwatch.log_group_name",
"operator": "ILIKE",
"value": "API-Gateway%"
}
]
}
]
}
]
},
"data_collected": {
"metrics": [
{
@@ -148,6 +217,146 @@
"name": "aws_ApiGateway_Latency_sum",
"unit": "Milliseconds",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_4xx_sum",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_4xx_max",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_4xx_min",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_4xx_count",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_5xx_sum",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_5xx_max",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_5xx_min",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_5xx_count",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_DataProcessed_sum",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_DataProcessed_max",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_DataProcessed_min",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_DataProcessed_count",
"unit": "Bytes",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ExecutionError_sum",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ExecutionError_max",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ExecutionError_min",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ExecutionError_count",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ClientError_sum",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ClientError_max",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ClientError_min",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ClientError_count",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_IntegrationError_sum",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_IntegrationError_max",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_IntegrationError_min",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_IntegrationError_count",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ConnectCount_sum",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ConnectCount_max",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ConnectCount_min",
"unit": "Count",
"type": "Gauge"
},
{
"name": "aws_ApiGateway_ConnectCount_count",
"unit": "Count",
"type": "Gauge"
}
],
"logs": [

View File

@@ -7,6 +7,24 @@
"metrics": true,
"logs": false
},
"ingestion_status_check": {
"metrics": [
{
"category": "$default",
"display_name": "Default",
"checks": [
{
"key": "aws_DynamoDB_AccountMaxReads_max",
"attributes": []
},
{
"key": "aws_DynamoDB_AccountProvisionedReadCapacityUtilization_max",
"attributes": []
}
]
}
]
},
"data_collected": {
"metrics": [
{
@@ -391,4 +409,4 @@
}
]
}
}
}

View File

@@ -7,6 +7,24 @@
"metrics": true,
"logs": false
},
"ingestion_status_check": {
"metrics": [
{
"category": "$default",
"display_name": "Default",
"checks": [
{
"key": "aws_EC2_CPUUtilization_max",
"attributes": []
},
{
"key": "aws_EC2_NetworkIn_max",
"attributes": []
}
]
}
]
},
"data_collected": {
"metrics": [
{
@@ -515,4 +533,4 @@
}
]
}
}
}

View File

@@ -7,6 +7,81 @@
"metrics": true,
"logs": true
},
"ingestion_status_check": {
"metrics": [
{
"category": "overview",
"display_name": "Overview",
"checks": [
{
"key": "aws_ECS_CPUUtilization_max",
"attributes": []
},
{
"key": "aws_ECS_MemoryUtilization_max",
"attributes": []
}
]
},
{
"category": "containerinsights",
"display_name": "Container Insights",
"checks": [
{
"key": "aws_ECS_ContainerInsights_NetworkRxBytes_max",
"attributes": []
},
{
"key": "aws_ECS_ContainerInsights_StorageReadBytes_max",
"attributes": []
}
]
},
{
"category": "enhanced_containerinsights",
"display_name": "Enhanced Container Insights",
"checks": [
{
"key": "aws_ECS_ContainerInsights_ContainerCpuUtilization_max",
"attributes": [
{
"name": "TaskId",
"operator": "EXISTS",
"value": ""
}
]
},
{
"key": "aws_ECS_ContainerInsights_TaskMemoryUtilization_max",
"attributes": [
{
"name": "TaskId",
"operator": "EXISTS",
"value": ""
}
]
}
]
}
],
"logs": [
{
"category": "$default",
"display_name": "Default",
"checks": [
{
"attributes": [
{
"name": "aws.cloudwatch.log_group_name",
"operator": "ILIKE",
"value": "%/ecs/%"
}
]
}
]
}
]
},
"data_collected": {
"metrics": [
{

View File

@@ -7,6 +7,20 @@
"metrics": true,
"logs": false
},
"ingestion_status_check": {
"metrics": [
{
"category": "$default",
"display_name": "Default",
"checks": [
{
"key": "aws_ElastiCache_CacheHitRate_max",
"attributes": []
}
]
}
]
},
"data_collected": {
"metrics":[
{
@@ -1928,7 +1942,7 @@
"unit": "Percent",
"type": "Gauge",
"description": ""
}
}
]
},
"telemetry_collection_strategy": {
@@ -1951,4 +1965,4 @@
}
]
}
}
}

View File

@@ -7,6 +7,37 @@
"metrics": true,
"logs": true
},
"ingestion_status_check": {
"metrics": [
{
"category": "$default",
"display_name": "Default",
"checks": [
{
"key": "aws_Lambda_Invocations_sum",
"attributes": []
}
]
}
],
"logs": [
{
"category": "$default",
"display_name": "Default",
"checks": [
{
"attributes": [
{
"name": "aws.cloudwatch.log_group_name",
"operator": "ILIKE",
"value": "/aws/lambda%"
}
]
}
]
}
]
},
"data_collected": {
"metrics": [
{

View File

@@ -7,6 +7,20 @@
"metrics": true,
"logs": false
},
"ingestion_status_check": {
"metrics": [
{
"category": "$default",
"display_name": "Default",
"checks": [
{
"key": "aws_Kafka_KafkaDataLogsDiskUsed_max",
"attributes": []
}
]
}
]
},
"data_collected": {
"metrics": [
{
@@ -1088,4 +1102,3 @@
]
}
}

View File

@@ -7,6 +7,37 @@
"metrics": true,
"logs": true
},
"ingestion_status_check": {
"metrics": [
{
"category": "$default",
"display_name": "Default",
"checks": [
{
"key": "aws_RDS_CPUUtilization_max",
"attributes": []
}
]
}
],
"logs": [
{
"category": "$default",
"display_name": "Default",
"checks": [
{
"attributes": [
{
"name": "resources.aws.cloudwatch.log_group_name",
"operator": "ILIKE",
"value": "/aws/rds%"
}
]
}
]
}
]
},
"data_collected": {
"metrics": [
{
@@ -800,4 +831,4 @@
}
]
}
}
}

View File

@@ -7,6 +7,20 @@
"metrics": true,
"logs": false
},
"ingestion_status_check": {
"metrics": [
{
"category": "$default",
"display_name": "Default",
"checks": [
{
"key": "aws_SNS_NumberOfMessagesPublished_sum",
"attributes": []
}
]
}
]
},
"data_collected": {
"metrics": [
{
@@ -127,4 +141,4 @@
}
]
}
}
}

View File

@@ -7,6 +7,24 @@
"metrics": true,
"logs": false
},
"ingestion_status_check": {
"metrics": [
{
"category": "$default",
"display_name": "Default",
"checks": [
{
"key": "aws_SQS_SentMessageSize_max",
"attributes": []
},
{
"key": "aws_SQS_NumberOfMessagesSent_sum",
"attributes": []
}
]
}
]
},
"data_collected": {
"metrics": [
{
@@ -247,4 +265,4 @@
}
]
}
}
}

View File

@@ -0,0 +1 @@
<svg id="f2f04349-8aee-4413-84c9-a9053611b319" xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 18 18"><defs><linearGradient id="ad4c4f96-09aa-4f91-ba10-5cb8ad530f74" x1="9" y1="15.83" x2="9" y2="5.79" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b3b3b3" /><stop offset="0.26" stop-color="#c1c1c1" /><stop offset="1" stop-color="#e6e6e6" /></linearGradient></defs><title>Icon-storage-86</title><path d="M.5,5.79h17a0,0,0,0,1,0,0v9.48a.57.57,0,0,1-.57.57H1.07a.57.57,0,0,1-.57-.57V5.79A0,0,0,0,1,.5,5.79Z" fill="url(#ad4c4f96-09aa-4f91-ba10-5cb8ad530f74)" /><path d="M1.07,2.17H16.93a.57.57,0,0,1,.57.57V5.79a0,0,0,0,1,0,0H.5a0,0,0,0,1,0,0V2.73A.57.57,0,0,1,1.07,2.17Z" fill="#37c2b1" /><path d="M2.81,6.89H15.18a.27.27,0,0,1,.26.27v1.4a.27.27,0,0,1-.26.27H2.81a.27.27,0,0,1-.26-.27V7.16A.27.27,0,0,1,2.81,6.89Z" fill="#fff" /><path d="M2.82,9.68H15.19a.27.27,0,0,1,.26.27v1.41a.27.27,0,0,1-.26.27H2.82a.27.27,0,0,1-.26-.27V10A.27.27,0,0,1,2.82,9.68Z" fill="#37c2b1" /><path d="M2.82,12.5H15.19a.27.27,0,0,1,.26.27v1.41a.27.27,0,0,1-.26.27H2.82a.27.27,0,0,1-.26-.27V12.77A.27.27,0,0,1,2.82,12.5Z" fill="#258277" /></svg>

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

@@ -0,0 +1,293 @@
{
"id": "blobstorage",
"title": "Blob Storage",
"icon": "file://icon.svg",
"overview": "file://overview.md",
"supported_signals": {
"metrics": true,
"logs": true
},
"ingestion_status_check": {
"metrics": [
{
"category": "$default",
"display_name": "Default",
"checks": [
{
"key": "placeholder",
"attributes": []
}
]
},
{
"category": "transactions",
"display_name": "Transactions",
"checks": [
{
"key": "placeholder",
"attributes": []
}
]
}
],
"logs": [
{
"category": "$default",
"display_name": "Default",
"checks": [
{
"attributes": [
{
"name": "placeholder",
"operator": "ILIKE",
"value": "%/ecs/%"
}
]
}
]
}
]
},
"data_collected": {
"metrics": [
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
}
],
"logs": [
{
"name": "placeholder_log_1",
"path": "placeholder.path.value",
"type": "string"
},
{
"name": "placeholder_log_1",
"path": "placeholder.path.value",
"type": "string"
},
{
"name": "placeholder_log_1",
"path": "placeholder.path.value",
"type": "string"
},
{
"name": "placeholder_log_1",
"path": "placeholder.path.value",
"type": "string"
}
]
},
"telemetry_collection_strategy": {
"azure_metrics": [
{
"category_type": "metrics",
"name": "Capacity"
},
{
"category_type": "metrics",
"name": "Transaction"
}
],
"azure_logs": [
{
"category_type": "logs",
"name": "StorageRead"
},
{
"category_type": "logs",
"name": "StorageWrite"
},
{
"category_type": "logs",
"name": "StorageDelete"
}
]
},
"assets": {
"dashboards": [
{
"id": "overview",
"title": "Blob Storage Overview",
"description": "Overview of Blob Storage",
"definition": "file://assets/dashboards/overview.json"
}
]
}
}

View File

@@ -0,0 +1,2 @@
Monitor Azure Blob Storage with SigNoz
Collect key Blob Storage metrics and view them with an out of the box dashboard.

View File

@@ -0,0 +1 @@
<svg id="f2f04349-8aee-4413-84c9-a9053611b319" xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 18 18"><defs><linearGradient id="ad4c4f96-09aa-4f91-ba10-5cb8ad530f74" x1="9" y1="15.83" x2="9" y2="5.79" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b3b3b3" /><stop offset="0.26" stop-color="#c1c1c1" /><stop offset="1" stop-color="#e6e6e6" /></linearGradient></defs><title>Icon-storage-86</title><path d="M.5,5.79h17a0,0,0,0,1,0,0v9.48a.57.57,0,0,1-.57.57H1.07a.57.57,0,0,1-.57-.57V5.79A0,0,0,0,1,.5,5.79Z" fill="url(#ad4c4f96-09aa-4f91-ba10-5cb8ad530f74)" /><path d="M1.07,2.17H16.93a.57.57,0,0,1,.57.57V5.79a0,0,0,0,1,0,0H.5a0,0,0,0,1,0,0V2.73A.57.57,0,0,1,1.07,2.17Z" fill="#37c2b1" /><path d="M2.81,6.89H15.18a.27.27,0,0,1,.26.27v1.4a.27.27,0,0,1-.26.27H2.81a.27.27,0,0,1-.26-.27V7.16A.27.27,0,0,1,2.81,6.89Z" fill="#fff" /><path d="M2.82,9.68H15.19a.27.27,0,0,1,.26.27v1.41a.27.27,0,0,1-.26.27H2.82a.27.27,0,0,1-.26-.27V10A.27.27,0,0,1,2.82,9.68Z" fill="#37c2b1" /><path d="M2.82,12.5H15.19a.27.27,0,0,1,.26.27v1.41a.27.27,0,0,1-.26.27H2.82a.27.27,0,0,1-.26-.27V12.77A.27.27,0,0,1,2.82,12.5Z" fill="#258277" /></svg>

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

@@ -0,0 +1,289 @@
{
"id": "frontdoor",
"title": "Front Door",
"icon": "file://icon.svg",
"overview": "file://overview.md",
"supported_signals": {
"metrics": true,
"logs": true
},
"ingestion_status_check": {
"metrics": [
{
"category": "overview",
"display_name": "Overview",
"checks": [
{
"key": "placeholder",
"attributes": []
}
]
},
{
"category": "insights",
"display_name": "Blob Storage Insights",
"checks": [
{
"key": "placeholder",
"attributes": []
}
]
}
],
"logs": [
{
"category": "$default",
"display_name": "Default",
"checks": [
{
"attributes": [
{
"name": "placeholder",
"operator": "ILIKE",
"value": "%/ecs/%"
}
]
}
]
}
]
},
"data_collected": {
"metrics": [
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
},
{
"name": "placeholder_metric_1",
"unit": "Percent",
"type": "Gauge",
"description": ""
}
],
"logs": [
{
"name": "placeholder_log_1",
"path": "placeholder.path.value",
"type": "string"
},
{
"name": "placeholder_log_1",
"path": "placeholder.path.value",
"type": "string"
},
{
"name": "placeholder_log_1",
"path": "placeholder.path.value",
"type": "string"
}
]
},
"telemetry_collection_strategy": {
"azure_metrics": [
{
"category_type": "metrics",
"name": "Capacity"
},
{
"category_type": "metrics",
"name": "Transaction"
}
],
"azure_logs": [
{
"category_type": "logs",
"name": "StorageRead"
},
{
"category_type": "logs",
"name": "StorageWrite"
},
{
"category_type": "logs",
"name": "StorageDelete"
}
]
},
"assets": {
"dashboards": [
{
"id": "overview",
"title": "Front Door Overview",
"description": "Overview of Blob Storage",
"definition": "file://assets/dashboards/overview.json"
}
]
}
}

View File

@@ -0,0 +1,2 @@
Monitor Azure Front Door with SigNoz
Collect key Front Door metrics and view them with an out of the box dashboard.

View File

@@ -1,91 +0,0 @@
package services
import (
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
)
type Metadata struct {
Id string `json:"id"`
Title string `json:"title"`
Icon string `json:"icon"`
}
type Definition struct {
Metadata
Overview string `json:"overview"` // markdown
Assets Assets `json:"assets"`
SupportedSignals SupportedSignals `json:"supported_signals"`
DataCollected DataCollected `json:"data_collected"`
Strategy *CollectionStrategy `json:"telemetry_collection_strategy"`
}
type Assets struct {
Dashboards []Dashboard `json:"dashboards"`
}
type SupportedSignals struct {
Logs bool `json:"logs"`
Metrics bool `json:"metrics"`
}
type DataCollected struct {
Logs []CollectedLogAttribute `json:"logs"`
Metrics []CollectedMetric `json:"metrics"`
}
type CollectedLogAttribute struct {
Name string `json:"name"`
Path string `json:"path"`
Type string `json:"type"`
}
type CollectedMetric struct {
Name string `json:"name"`
Type string `json:"type"`
Unit string `json:"unit"`
Description string `json:"description"`
}
type CollectionStrategy struct {
Provider string `json:"provider"`
AWSMetrics *AWSMetricsStrategy `json:"aws_metrics,omitempty"`
AWSLogs *AWSLogsStrategy `json:"aws_logs,omitempty"`
S3Buckets map[string][]string `json:"s3_buckets,omitempty"` // Only available in S3 Sync Service Type
}
type AWSMetricsStrategy struct {
// to be used as https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-metricstream.html#cfn-cloudwatch-metricstream-includefilters
StreamFilters []struct {
// json tags here are in the shape expected by AWS API as detailed at
// https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudwatch-metricstream-metricstreamfilter.html
Namespace string `json:"Namespace"`
MetricNames []string `json:"MetricNames,omitempty"`
} `json:"cloudwatch_metric_stream_filters"`
}
type AWSLogsStrategy struct {
Subscriptions []struct {
// subscribe to all logs groups with specified prefix.
// eg: `/aws/rds/`
LogGroupNamePrefix string `json:"log_group_name_prefix"`
// https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html
// "" implies no filtering is required.
FilterPattern string `json:"filter_pattern"`
} `json:"cloudwatch_logs_subscriptions"`
}
type Dashboard struct {
Id string `json:"id"`
Url string `json:"url"`
Title string `json:"title"`
Description string `json:"description"`
Image string `json:"image"`
Definition *dashboardtypes.StorableDashboardData `json:"definition,omitempty"`
}

View File

@@ -2,128 +2,111 @@ package services
import (
"bytes"
"context"
"embed"
"encoding/json"
"fmt"
"io/fs"
"path"
"sort"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
"github.com/SigNoz/signoz/pkg/valuer"
koanfJson "github.com/knadh/koanf/parsers/json"
"golang.org/x/exp/maps"
)
const (
S3Sync = "s3sync"
)
var (
CodeUnsupportedCloudProvider = errors.MustNewCode("unsupported_cloud_provider")
CodeUnsupportedServiceType = errors.MustNewCode("unsupported_service_type")
CodeServiceDefinitionNotFound = errors.MustNewCode("service_definition_not_dound")
CodeUnsupportedCloudProvider = errors.MustNewCode("unsupported_cloud_provider")
CodeUnsupportedServiceType = errors.MustNewCode("unsupported_service_type")
)
func List(cloudProvider string) ([]Definition, *model.ApiError) {
cloudServices, found := supportedServices[cloudProvider]
if !found || cloudServices == nil {
return nil, model.NotFoundError(fmt.Errorf(
"unsupported cloud provider: %s", cloudProvider,
))
}
services := maps.Values(cloudServices)
sort.Slice(services, func(i, j int) bool {
return services[i].Id < services[j].Id
})
return services, nil
type ServicesProvider[T integrationtypes.Definition] struct {
definitions map[string]T
}
func Map(cloudProvider string) (map[string]Definition, error) {
cloudServices, found := supportedServices[cloudProvider]
if !found || cloudServices == nil {
return nil, errors.Newf(errors.TypeNotFound, CodeUnsupportedCloudProvider, "unsupported cloud provider: %s", cloudProvider)
func (a *ServicesProvider[T]) ListServiceDefinitions(ctx context.Context) (map[string]T, error) {
return a.definitions, nil
}
func (a *ServicesProvider[T]) GetServiceDefinition(ctx context.Context, serviceName string) (T, error) {
def, ok := a.definitions[serviceName]
if !ok {
return *new(T), errors.NewNotFoundf(CodeServiceDefinitionNotFound, "azure service definition not found: %s", serviceName)
}
return def, nil
}
func NewAWSCloudProviderServices() (*ServicesProvider[*integrationtypes.AWSDefinition], error) {
definitions, err := readAllServiceDefinitions(integrationtypes.CloudProviderAWS)
if err != nil {
return nil, err
}
serviceDefinitions := make(map[string]*integrationtypes.AWSDefinition)
for id, def := range definitions {
typedDef, ok := def.(*integrationtypes.AWSDefinition)
if !ok {
return nil, errors.NewInternalf(errors.CodeInternal, "invalid type for AWS service definition %s", id)
}
serviceDefinitions[id] = typedDef
}
return &ServicesProvider[*integrationtypes.AWSDefinition]{
definitions: serviceDefinitions,
}, nil
}
func NewAzureCloudProviderServices() (*ServicesProvider[*integrationtypes.AzureDefinition], error) {
definitions, err := readAllServiceDefinitions(integrationtypes.CloudProviderAzure)
if err != nil {
return nil, err
}
serviceDefinitions := make(map[string]*integrationtypes.AzureDefinition)
for id, def := range definitions {
typedDef, ok := def.(*integrationtypes.AzureDefinition)
if !ok {
return nil, errors.NewInternalf(errors.CodeInternal, "invalid type for Azure service definition %s", id)
}
serviceDefinitions[id] = typedDef
}
return &ServicesProvider[*integrationtypes.AzureDefinition]{
definitions: serviceDefinitions,
}, nil
}
// End of API. Logic for reading service definition files follows
//go:embed definitions/*
var definitionFiles embed.FS
func readAllServiceDefinitions(cloudProvider valuer.String) (map[string]any, error) {
rootDirName := "definitions"
cloudProviderDirPath := path.Join(rootDirName, cloudProvider.String())
cloudServices, err := readServiceDefinitionsFromDir(cloudProvider, cloudProviderDirPath)
if err != nil {
return nil, err
}
if len(cloudServices) < 1 {
return nil, errors.NewInternalf(errors.CodeInternal, "no service definitions found in %s", cloudProviderDirPath)
}
return cloudServices, nil
}
func GetServiceDefinition(cloudProvider, serviceType string) (*Definition, error) {
cloudServices := supportedServices[cloudProvider]
if cloudServices == nil {
return nil, errors.Newf(errors.TypeNotFound, CodeUnsupportedCloudProvider, "unsupported cloud provider: %s", cloudProvider)
}
svc, exists := cloudServices[serviceType]
if !exists {
return nil, errors.Newf(errors.TypeNotFound, CodeUnsupportedServiceType, "%s service not found: %s", cloudProvider, serviceType)
}
return &svc, nil
}
// End of API. Logic for reading service definition files follows
// Service details read from ./serviceDefinitions
// { "providerName": { "service_id": {...}} }
var supportedServices map[string]map[string]Definition
func init() {
err := readAllServiceDefinitions()
if err != nil {
panic(fmt.Errorf(
"couldn't read cloud service definitions: %w", err,
))
}
}
//go:embed definitions/*
var definitionFiles embed.FS
func readAllServiceDefinitions() error {
supportedServices = map[string]map[string]Definition{}
rootDirName := "definitions"
cloudProviderDirs, err := fs.ReadDir(definitionFiles, rootDirName)
if err != nil {
return fmt.Errorf("couldn't read dirs in %s: %w", rootDirName, err)
}
for _, d := range cloudProviderDirs {
if !d.IsDir() {
continue
}
cloudProvider := d.Name()
cloudProviderDirPath := path.Join(rootDirName, cloudProvider)
cloudServices, err := readServiceDefinitionsFromDir(cloudProvider, cloudProviderDirPath)
if err != nil {
return fmt.Errorf("couldn't read %s service definitions: %w", cloudProvider, err)
}
if len(cloudServices) < 1 {
return fmt.Errorf("no %s services could be read", cloudProvider)
}
supportedServices[cloudProvider] = cloudServices
}
return nil
}
func readServiceDefinitionsFromDir(cloudProvider string, cloudProviderDirPath string) (
map[string]Definition, error,
) {
func readServiceDefinitionsFromDir(cloudProvider valuer.String, cloudProviderDirPath string) (map[string]any, error) {
svcDefDirs, err := fs.ReadDir(definitionFiles, cloudProviderDirPath)
if err != nil {
return nil, fmt.Errorf("couldn't list integrations dirs: %w", err)
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't list integrations dirs")
}
svcDefs := map[string]Definition{}
svcDefs := make(map[string]any)
for _, d := range svcDefDirs {
if !d.IsDir() {
@@ -133,103 +116,73 @@ func readServiceDefinitionsFromDir(cloudProvider string, cloudProviderDirPath st
svcDirPath := path.Join(cloudProviderDirPath, d.Name())
s, err := readServiceDefinition(cloudProvider, svcDirPath)
if err != nil {
return nil, fmt.Errorf("couldn't read svc definition for %s: %w", d.Name(), err)
return nil, err
}
_, exists := svcDefs[s.Id]
_, exists := svcDefs[s.GetId()]
if exists {
return nil, fmt.Errorf(
"duplicate service definition for id %s at %s", s.Id, d.Name(),
)
return nil, errors.NewInternalf(errors.CodeInternal, "duplicate service definition for id %s at %s", s.GetId(), d.Name())
}
svcDefs[s.Id] = *s
svcDefs[s.GetId()] = s
}
return svcDefs, nil
}
func readServiceDefinition(cloudProvider string, svcDirpath string) (*Definition, error) {
func readServiceDefinition(cloudProvider valuer.String, svcDirpath string) (integrationtypes.Definition, error) {
integrationJsonPath := path.Join(svcDirpath, "integration.json")
serializedSpec, err := definitionFiles.ReadFile(integrationJsonPath)
if err != nil {
return nil, fmt.Errorf(
"couldn't find integration.json in %s: %w",
svcDirpath, err,
)
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't read integration definition in %s", svcDirpath)
}
integrationSpec, err := koanfJson.Parser().Unmarshal(serializedSpec)
if err != nil {
return nil, fmt.Errorf(
"couldn't parse integration.json from %s: %w",
integrationJsonPath, err,
)
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't parse integration definition in %s", svcDirpath)
}
hydrated, err := integrations.HydrateFileUris(
integrationSpec, definitionFiles, svcDirpath,
)
hydrated, err := integrations.HydrateFileUris(integrationSpec, definitionFiles, svcDirpath)
if err != nil {
return nil, fmt.Errorf(
"couldn't hydrate files referenced in service definition %s: %w",
integrationJsonPath, err,
)
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't hydrate integration definition in %s", svcDirpath)
}
hydratedSpec := hydrated.(map[string]any)
serviceDef, err := ParseStructWithJsonTagsFromMap[Definition](hydratedSpec)
if err != nil {
return nil, fmt.Errorf(
"couldn't parse hydrated JSON spec read from %s: %w",
integrationJsonPath, err,
)
var serviceDef integrationtypes.Definition
switch cloudProvider {
case integrationtypes.CloudProviderAWS:
serviceDef = &integrationtypes.AWSDefinition{}
case integrationtypes.CloudProviderAzure:
serviceDef = &integrationtypes.AzureDefinition{}
default:
// ideally this shouldn't happen hence throwing internal error
return nil, errors.NewInternalf(errors.CodeInternal, "unsupported cloud provider: %s", cloudProvider)
}
err = validateServiceDefinition(serviceDef)
err = parseStructWithJsonTagsFromMap(hydratedSpec, serviceDef)
if err != nil {
return nil, fmt.Errorf("invalid service definition %s: %w", serviceDef.Id, err)
return nil, err
}
err = serviceDef.Validate()
if err != nil {
return nil, err
}
serviceDef.Strategy.Provider = cloudProvider
return serviceDef, nil
}
func validateServiceDefinition(s *Definition) error {
// Validate dashboard data
seenDashboardIds := map[string]interface{}{}
for _, dd := range s.Assets.Dashboards {
if _, seen := seenDashboardIds[dd.Id]; seen {
return fmt.Errorf("multiple dashboards found with id %s", dd.Id)
}
seenDashboardIds[dd.Id] = nil
}
if s.Strategy == nil {
return fmt.Errorf("telemetry_collection_strategy is required")
}
// potentially more to follow
return nil
}
func ParseStructWithJsonTagsFromMap[StructType any](data map[string]any) (
*StructType, error,
) {
func parseStructWithJsonTagsFromMap(data map[string]any, target interface{}) error {
mapJson, err := json.Marshal(data)
if err != nil {
return nil, fmt.Errorf("couldn't marshal map to json: %w", err)
return errors.WrapInternalf(err, errors.CodeInternal, "couldn't marshal service definition json data")
}
var res StructType
decoder := json.NewDecoder(bytes.NewReader(mapJson))
decoder.DisallowUnknownFields()
err = decoder.Decode(&res)
err = decoder.Decode(target)
if err != nil {
return nil, fmt.Errorf("couldn't unmarshal json back to struct: %w", err)
return errors.WrapInternalf(err, errors.CodeInternal, "couldn't unmarshal service definition json data")
}
return &res, nil
return nil
}

View File

@@ -1,35 +1,3 @@
package services
import (
"testing"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/stretchr/testify/require"
)
func TestAvailableServices(t *testing.T) {
require := require.New(t)
// should be able to list available services.
_, apiErr := List("bad-cloud-provider")
require.NotNil(apiErr)
require.Equal(model.ErrorNotFound, apiErr.Type())
awsSvcs, apiErr := List("aws")
require.Nil(apiErr)
require.Greater(len(awsSvcs), 0)
// should be able to get details of a service
_, err := GetServiceDefinition(
"aws", "bad-service-id",
)
require.NotNil(err)
require.True(errors.Ast(err, errors.TypeNotFound))
svc, err := GetServiceDefinition(
"aws", awsSvcs[0].Id,
)
require.Nil(err)
require.Equal(*svc, awsSvcs[0])
}
// TODO: add more tests for services package

View File

@@ -1,55 +1,57 @@
package cloudintegrations
package store
import (
"context"
"database/sql"
"fmt"
"log/slog"
"strings"
"time"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
type cloudProviderAccountsRepository interface {
listConnected(ctx context.Context, orgId string, provider string) ([]types.CloudIntegration, *model.ApiError)
var (
CodeCloudIntegrationAccountNotFound errors.Code = errors.MustNewCode("cloud_integration_account_not_found")
)
get(ctx context.Context, orgId string, provider string, id string) (*types.CloudIntegration, *model.ApiError)
type CloudProviderAccountsRepository interface {
ListConnected(ctx context.Context, orgId string, provider string) ([]integrationtypes.CloudIntegration, error)
getConnectedCloudAccount(ctx context.Context, orgId string, provider string, accountID string) (*types.CloudIntegration, *model.ApiError)
Get(ctx context.Context, orgId string, provider string, id string) (*integrationtypes.CloudIntegration, error)
GetConnectedCloudAccount(ctx context.Context, orgId, provider string, accountID string) (*integrationtypes.CloudIntegration, error)
// Insert an account or update it by (cloudProvider, id)
// for specified non-empty fields
upsert(
Upsert(
ctx context.Context,
orgId string,
provider string,
id *string,
config *types.AccountConfig,
config []byte,
accountId *string,
agentReport *types.AgentReport,
agentReport *integrationtypes.AgentReport,
removedAt *time.Time,
) (*types.CloudIntegration, *model.ApiError)
) (*integrationtypes.CloudIntegration, error)
}
func newCloudProviderAccountsRepository(store sqlstore.SQLStore) (
*cloudProviderAccountsSQLRepository, error,
) {
return &cloudProviderAccountsSQLRepository{
store: store,
}, nil
func NewCloudProviderAccountsRepository(store sqlstore.SQLStore) CloudProviderAccountsRepository {
return &cloudProviderAccountsSQLRepository{store: store}
}
type cloudProviderAccountsSQLRepository struct {
store sqlstore.SQLStore
}
func (r *cloudProviderAccountsSQLRepository) listConnected(
func (r *cloudProviderAccountsSQLRepository) ListConnected(
ctx context.Context, orgId string, cloudProvider string,
) ([]types.CloudIntegration, *model.ApiError) {
accounts := []types.CloudIntegration{}
) ([]integrationtypes.CloudIntegration, error) {
accounts := []integrationtypes.CloudIntegration{}
err := r.store.BunDB().NewSelect().
Model(&accounts).
@@ -62,18 +64,17 @@ func (r *cloudProviderAccountsSQLRepository) listConnected(
Scan(ctx)
if err != nil {
return nil, model.InternalError(fmt.Errorf(
"could not query connected cloud accounts: %w", err,
))
slog.ErrorContext(ctx, "error querying connected cloud accounts", "error", err)
return nil, errors.WrapInternalf(err, errors.CodeInternal, "could not query connected cloud accounts")
}
return accounts, nil
}
func (r *cloudProviderAccountsSQLRepository) get(
func (r *cloudProviderAccountsSQLRepository) Get(
ctx context.Context, orgId string, provider string, id string,
) (*types.CloudIntegration, *model.ApiError) {
var result types.CloudIntegration
) (*integrationtypes.CloudIntegration, error) {
var result integrationtypes.CloudIntegration
err := r.store.BunDB().NewSelect().
Model(&result).
@@ -82,23 +83,25 @@ func (r *cloudProviderAccountsSQLRepository) get(
Where("id = ?", id).
Scan(ctx)
if err == sql.ErrNoRows {
return nil, model.NotFoundError(fmt.Errorf(
"couldn't find account with Id %s", id,
))
} else if err != nil {
return nil, model.InternalError(fmt.Errorf(
"couldn't query cloud provider accounts: %w", err,
))
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, errors.WrapNotFoundf(
err,
CodeCloudIntegrationAccountNotFound,
"couldn't find account with Id %s", id,
)
}
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't query cloud provider account")
}
return &result, nil
}
func (r *cloudProviderAccountsSQLRepository) getConnectedCloudAccount(
func (r *cloudProviderAccountsSQLRepository) GetConnectedCloudAccount(
ctx context.Context, orgId string, provider string, accountId string,
) (*types.CloudIntegration, *model.ApiError) {
var result types.CloudIntegration
) (*integrationtypes.CloudIntegration, error) {
var result integrationtypes.CloudIntegration
err := r.store.BunDB().NewSelect().
Model(&result).
@@ -109,29 +112,25 @@ func (r *cloudProviderAccountsSQLRepository) getConnectedCloudAccount(
Where("removed_at is NULL").
Scan(ctx)
if err == sql.ErrNoRows {
return nil, model.NotFoundError(fmt.Errorf(
"couldn't find connected cloud account %s", accountId,
))
if errors.Is(err, sql.ErrNoRows) {
return nil, errors.WrapNotFoundf(err, CodeCloudIntegrationAccountNotFound, "couldn't find connected cloud account %s", accountId)
} else if err != nil {
return nil, model.InternalError(fmt.Errorf(
"couldn't query cloud provider accounts: %w", err,
))
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't query cloud provider account")
}
return &result, nil
}
func (r *cloudProviderAccountsSQLRepository) upsert(
func (r *cloudProviderAccountsSQLRepository) Upsert(
ctx context.Context,
orgId string,
provider string,
id *string,
config *types.AccountConfig,
config []byte,
accountId *string,
agentReport *types.AgentReport,
agentReport *integrationtypes.AgentReport,
removedAt *time.Time,
) (*types.CloudIntegration, *model.ApiError) {
) (*integrationtypes.CloudIntegration, error) {
// Insert
if id == nil {
temp := valuer.GenerateUUID().StringValue()
@@ -181,7 +180,7 @@ func (r *cloudProviderAccountsSQLRepository) upsert(
)
}
integration := types.CloudIntegration{
integration := integrationtypes.CloudIntegration{
OrgID: orgId,
Provider: provider,
Identifiable: types.Identifiable{ID: valuer.MustNewUUID(*id)},
@@ -189,28 +188,25 @@ func (r *cloudProviderAccountsSQLRepository) upsert(
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
Config: config,
Config: string(config),
AccountID: accountId,
LastAgentReport: agentReport,
RemovedAt: removedAt,
}
_, dbErr := r.store.BunDB().NewInsert().
_, err := r.store.BunDB().NewInsert().
Model(&integration).
On(onConflictClause).
Exec(ctx)
if dbErr != nil {
return nil, model.InternalError(fmt.Errorf(
"could not upsert cloud account record: %w", dbErr,
))
if err != nil {
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't upsert cloud integration account")
}
upsertedAccount, apiErr := r.get(ctx, orgId, provider, *id)
if apiErr != nil {
return nil, model.InternalError(fmt.Errorf(
"couldn't fetch upserted account by id: %w", apiErr.ToError(),
))
upsertedAccount, err := r.Get(ctx, orgId, provider, *id)
if err != nil {
slog.ErrorContext(ctx, "error upserting cloud integration account", "error", err)
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't get upserted cloud integration account")
}
return upsertedAccount, nil

View File

@@ -1,64 +1,63 @@
package cloudintegrations
package store
import (
"context"
"database/sql"
"fmt"
"time"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
"github.com/SigNoz/signoz/pkg/valuer"
)
var (
CodeServiceConfigNotFound = errors.MustNewCode("service_config_not_found")
)
type ServiceConfigDatabase interface {
get(
Get(
ctx context.Context,
orgID string,
cloudAccountId string,
serviceType string,
) (*types.CloudServiceConfig, *model.ApiError)
) ([]byte, error)
upsert(
Upsert(
ctx context.Context,
orgID string,
cloudProvider string,
cloudAccountId string,
serviceId string,
config types.CloudServiceConfig,
) (*types.CloudServiceConfig, *model.ApiError)
config []byte,
) ([]byte, error)
getAllForAccount(
GetAllForAccount(
ctx context.Context,
orgID string,
cloudAccountId string,
) (
configsBySvcId map[string]*types.CloudServiceConfig,
apiErr *model.ApiError,
map[string][]byte,
error,
)
}
func newServiceConfigRepository(store sqlstore.SQLStore) (
*serviceConfigSQLRepository, error,
) {
return &serviceConfigSQLRepository{
store: store,
}, nil
func NewServiceConfigRepository(store sqlstore.SQLStore) ServiceConfigDatabase {
return &serviceConfigSQLRepository{store: store}
}
type serviceConfigSQLRepository struct {
store sqlstore.SQLStore
}
func (r *serviceConfigSQLRepository) get(
func (r *serviceConfigSQLRepository) Get(
ctx context.Context,
orgID string,
cloudAccountId string,
serviceType string,
) (*types.CloudServiceConfig, *model.ApiError) {
var result types.CloudIntegrationService
) ([]byte, error) {
var result integrationtypes.CloudIntegrationService
err := r.store.BunDB().NewSelect().
Model(&result).
@@ -67,36 +66,30 @@ func (r *serviceConfigSQLRepository) get(
Where("ci.id = ?", cloudAccountId).
Where("cis.type = ?", serviceType).
Scan(ctx)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, errors.WrapNotFoundf(err, CodeServiceConfigNotFound, "couldn't find config for cloud account %s", cloudAccountId)
}
if err == sql.ErrNoRows {
return nil, model.NotFoundError(fmt.Errorf(
"couldn't find config for cloud account %s",
cloudAccountId,
))
} else if err != nil {
return nil, model.InternalError(fmt.Errorf(
"couldn't query cloud service config: %w", err,
))
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't query cloud service config")
}
return &result.Config, nil
return []byte(result.Config), nil
}
func (r *serviceConfigSQLRepository) upsert(
func (r *serviceConfigSQLRepository) Upsert(
ctx context.Context,
orgID string,
cloudProvider string,
cloudAccountId string,
serviceId string,
config types.CloudServiceConfig,
) (*types.CloudServiceConfig, *model.ApiError) {
config []byte,
) ([]byte, error) {
// get cloud integration id from account id
// if the account is not connected, we don't need to upsert the config
var cloudIntegrationId string
err := r.store.BunDB().NewSelect().
Model((*types.CloudIntegration)(nil)).
Model((*integrationtypes.CloudIntegration)(nil)).
Column("id").
Where("provider = ?", cloudProvider).
Where("account_id = ?", cloudAccountId).
@@ -104,20 +97,24 @@ func (r *serviceConfigSQLRepository) upsert(
Where("removed_at is NULL").
Where("last_agent_report is not NULL").
Scan(ctx, &cloudIntegrationId)
if err != nil {
return nil, model.InternalError(fmt.Errorf(
"couldn't query cloud integration id: %w", err,
))
if errors.Is(err, sql.ErrNoRows) {
return nil, errors.WrapNotFoundf(
err,
CodeCloudIntegrationAccountNotFound,
"couldn't find active cloud integration account",
)
}
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't query cloud integration id")
}
serviceConfig := types.CloudIntegrationService{
serviceConfig := integrationtypes.CloudIntegrationService{
Identifiable: types.Identifiable{ID: valuer.GenerateUUID()},
TimeAuditable: types.TimeAuditable{
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
},
Config: config,
Config: string(config),
Type: serviceId,
CloudIntegrationID: cloudIntegrationId,
}
@@ -126,21 +123,18 @@ func (r *serviceConfigSQLRepository) upsert(
On("conflict(cloud_integration_id, type) do update set config=excluded.config, updated_at=excluded.updated_at").
Exec(ctx)
if err != nil {
return nil, model.InternalError(fmt.Errorf(
"could not upsert cloud service config: %w", err,
))
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't upsert cloud service config")
}
return &serviceConfig.Config, nil
return config, nil
}
func (r *serviceConfigSQLRepository) getAllForAccount(
func (r *serviceConfigSQLRepository) GetAllForAccount(
ctx context.Context,
orgID string,
cloudAccountId string,
) (map[string]*types.CloudServiceConfig, *model.ApiError) {
serviceConfigs := []types.CloudIntegrationService{}
) (map[string][]byte, error) {
var serviceConfigs []integrationtypes.CloudIntegrationService
err := r.store.BunDB().NewSelect().
Model(&serviceConfigs).
@@ -149,15 +143,13 @@ func (r *serviceConfigSQLRepository) getAllForAccount(
Where("ci.org_id = ?", orgID).
Scan(ctx)
if err != nil {
return nil, model.InternalError(fmt.Errorf(
"could not query service configs from db: %w", err,
))
return nil, errors.WrapInternalf(err, errors.CodeInternal, "couldn't query service configs from db")
}
result := map[string]*types.CloudServiceConfig{}
result := make(map[string][]byte)
for _, r := range serviceConfigs {
result[r.Type] = &r.Config
result[r.Type] = []byte(r.Config)
}
return result, nil

View File

@@ -6,11 +6,7 @@ import (
"database/sql"
"encoding/json"
"fmt"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/modules/thirdpartyapi"
"github.com/SigNoz/signoz/pkg/queryparser"
"log/slog"
"io"
"math"
@@ -25,14 +21,19 @@ import (
"time"
"github.com/SigNoz/signoz/pkg/alertmanager"
"github.com/SigNoz/signoz/pkg/errors"
errorsV2 "github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/http/middleware"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/licensing"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
"github.com/SigNoz/signoz/pkg/modules/thirdpartyapi"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
"github.com/SigNoz/signoz/pkg/query-service/app/metricsexplorer"
"github.com/SigNoz/signoz/pkg/queryparser"
"github.com/SigNoz/signoz/pkg/signoz"
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/prometheus/prometheus/promql"
@@ -44,7 +45,6 @@ import (
"github.com/SigNoz/signoz/pkg/contextlinks"
traceFunnelsModule "github.com/SigNoz/signoz/pkg/modules/tracefunnel"
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
"github.com/SigNoz/signoz/pkg/query-service/app/inframetrics"
queues2 "github.com/SigNoz/signoz/pkg/query-service/app/integrations/messagingQueues/queues"
"github.com/SigNoz/signoz/pkg/query-service/app/logs"
@@ -111,7 +111,7 @@ type APIHandler struct {
IntegrationsController *integrations.Controller
CloudIntegrationsController *cloudintegrations.Controller
cloudIntegrationsRegistry map[integrationtypes.CloudProviderType]integrationtypes.CloudProvider
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
@@ -158,9 +158,6 @@ type APIHandlerOpts struct {
// Integrations
IntegrationsController *integrations.Controller
// Cloud Provider Integrations
CloudIntegrationsController *cloudintegrations.Controller
// Log parsing pipelines
LogsParsingPipelineController *logparsingpipeline.LogParsingPipelineController
@@ -174,6 +171,8 @@ type APIHandlerOpts struct {
QueryParserAPI *queryparser.API
Signoz *signoz.SigNoz
Logger *slog.Logger
}
// NewAPIHandler returns an APIHandler
@@ -209,12 +208,21 @@ func NewAPIHandler(opts APIHandlerOpts, config signoz.Config) (*APIHandler, erro
summaryService := metricsexplorer.NewSummaryService(opts.Reader, opts.RuleManager, opts.Signoz.Modules.Dashboard)
//quickFilterModule := quickfilter.NewAPI(opts.QuickFilterModule)
cloudIntegrationsRegistry, err := cloudintegrations.NewCloudProviderRegistry(
opts.Logger,
opts.Signoz.SQLStore,
opts.Signoz.Querier,
)
if err != nil {
return nil, err
}
aH := &APIHandler{
reader: opts.Reader,
temporalityMap: make(map[string]map[v3.Temporality]bool),
ruleManager: opts.RuleManager,
IntegrationsController: opts.IntegrationsController,
CloudIntegrationsController: opts.CloudIntegrationsController,
cloudIntegrationsRegistry: cloudIntegrationsRegistry,
LogsParsingPipelineController: opts.LogsParsingPipelineController,
querier: querier,
querierV2: querierv2,
@@ -1209,13 +1217,22 @@ func (aH *APIHandler) Get(rw http.ResponseWriter, r *http.Request) {
}
dashboard := new(dashboardtypes.Dashboard)
if aH.CloudIntegrationsController.IsCloudIntegrationDashboardUuid(id) {
cloudIntegrationDashboard, apiErr := aH.CloudIntegrationsController.GetDashboardById(ctx, orgID, id)
if apiErr != nil {
render.Error(rw, errorsV2.Wrapf(apiErr, errorsV2.TypeInternal, errorsV2.CodeInternal, "failed to get dashboard"))
if integrationtypes.IsCloudIntegrationDashboardUuid(id) {
cloudProvider, err := integrationtypes.GetCloudProviderFromDashboardID(id)
if err != nil {
render.Error(rw, err)
return
}
dashboard = cloudIntegrationDashboard
integrationDashboard, err := aH.cloudIntegrationsRegistry[cloudProvider].GetDashboard(ctx, &integrationtypes.GettableDashboard{
ID: id,
OrgID: orgID,
})
if err != nil {
render.Error(rw, err)
return
}
dashboard = integrationDashboard
} else if aH.IntegrationsController.IsInstalledIntegrationDashboardID(id) {
integrationDashboard, apiErr := aH.IntegrationsController.GetInstalledIntegrationDashboardById(ctx, orgID, id)
if apiErr != nil {
@@ -1279,11 +1296,13 @@ func (aH *APIHandler) List(rw http.ResponseWriter, r *http.Request) {
dashboards = append(dashboards, installedIntegrationDashboards...)
}
cloudIntegrationDashboards, apiErr := aH.CloudIntegrationsController.AvailableDashboards(ctx, orgID)
if apiErr != nil {
zap.L().Error("failed to get dashboards for cloud integrations", zap.Error(apiErr))
} else {
dashboards = append(dashboards, cloudIntegrationDashboards...)
for _, provider := range aH.cloudIntegrationsRegistry {
cloudIntegrationDashboards, err := provider.GetAvailableDashboards(ctx, orgID)
if err != nil {
zap.L().Error("failed to get dashboards for cloud integrations", zap.Error(apiErr))
} else {
dashboards = append(dashboards, cloudIntegrationDashboards...)
}
}
gettableDashboards, err := dashboardtypes.NewGettableDashboardsFromDashboards(dashboards)
@@ -3259,15 +3278,15 @@ func (aH *APIHandler) GetIntegrationConnectionStatus(w http.ResponseWriter, r *h
lookbackSeconds = 15 * 60
}
connectionStatus, apiErr := aH.calculateConnectionStatus(
connectionStatus, err := aH.calculateConnectionStatus(
r.Context(), orgID, connectionTests, lookbackSeconds,
)
if apiErr != nil {
RespondError(w, apiErr, "Failed to calculate integration connection status")
if err != nil {
render.Error(w, err)
return
}
aH.Respond(w, connectionStatus)
render.Success(w, http.StatusOK, connectionStatus)
}
func (aH *APIHandler) calculateConnectionStatus(
@@ -3275,10 +3294,11 @@ func (aH *APIHandler) calculateConnectionStatus(
orgID valuer.UUID,
connectionTests *integrations.IntegrationConnectionTests,
lookbackSeconds int64,
) (*integrations.IntegrationConnectionStatus, *model.ApiError) {
) (*integrations.IntegrationConnectionStatus, error) {
// Calculate connection status for signals in parallel
result := &integrations.IntegrationConnectionStatus{}
// TODO: migrate to errors package
errors := []*model.ApiError{}
var resultLock sync.Mutex
@@ -3476,12 +3496,14 @@ func (aH *APIHandler) UninstallIntegration(w http.ResponseWriter, r *http.Reques
aH.Respond(w, map[string]interface{}{})
}
// cloud provider integrations
// RegisterCloudIntegrationsRoutes register routes for cloud provider integrations
func (aH *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *middleware.AuthZ) {
subRouter := router.PathPrefix("/api/v1/cloud-integrations").Subrouter()
subRouter.Use(middleware.NewRecovery(aH.Signoz.Instrumentation.Logger()).Wrap)
subRouter.HandleFunc(
"/{cloudProvider}/accounts/generate-connection-url", am.EditAccess(aH.CloudIntegrationsGenerateConnectionUrl),
"/{cloudProvider}/accounts/generate-connection-url", am.EditAccess(aH.CloudIntegrationsGenerateConnectionArtifact),
).Methods(http.MethodPost)
subRouter.HandleFunc(
@@ -3515,170 +3537,199 @@ func (aH *APIHandler) RegisterCloudIntegrationsRoutes(router *mux.Router, am *mi
subRouter.HandleFunc(
"/{cloudProvider}/services/{serviceId}/config", am.EditAccess(aH.CloudIntegrationsUpdateServiceConfig),
).Methods(http.MethodPost)
}
func (aH *APIHandler) CloudIntegrationsListConnectedAccounts(
w http.ResponseWriter, r *http.Request,
) {
cloudProvider := mux.Vars(r)["cloudProvider"]
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
if errv2 != nil {
render.Error(w, errv2)
return
}
resp, apiErr := aH.CloudIntegrationsController.ListConnectedAccounts(
r.Context(), claims.OrgID, cloudProvider,
)
if apiErr != nil {
RespondError(w, apiErr, nil)
return
}
aH.Respond(w, resp)
}
func (aH *APIHandler) CloudIntegrationsGenerateConnectionUrl(
w http.ResponseWriter, r *http.Request,
) {
cloudProvider := mux.Vars(r)["cloudProvider"]
req := cloudintegrations.GenerateConnectionUrlRequest{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
if errv2 != nil {
render.Error(w, errv2)
return
}
result, apiErr := aH.CloudIntegrationsController.GenerateConnectionUrl(
r.Context(), claims.OrgID, cloudProvider, req,
)
if apiErr != nil {
RespondError(w, apiErr, nil)
return
}
aH.Respond(w, result)
}
func (aH *APIHandler) CloudIntegrationsGetAccountStatus(
w http.ResponseWriter, r *http.Request,
) {
cloudProvider := mux.Vars(r)["cloudProvider"]
accountId := mux.Vars(r)["accountId"]
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
if errv2 != nil {
render.Error(w, errv2)
return
}
resp, apiErr := aH.CloudIntegrationsController.GetAccountStatus(
r.Context(), claims.OrgID, cloudProvider, accountId,
)
if apiErr != nil {
RespondError(w, apiErr, nil)
return
}
aH.Respond(w, resp)
}
func (aH *APIHandler) CloudIntegrationsAgentCheckIn(
w http.ResponseWriter, r *http.Request,
) {
cloudProvider := mux.Vars(r)["cloudProvider"]
req := cloudintegrations.AgentCheckInRequest{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
if errv2 != nil {
render.Error(w, errv2)
return
}
result, err := aH.CloudIntegrationsController.CheckInAsAgent(
r.Context(), claims.OrgID, cloudProvider, req,
)
func (aH *APIHandler) CloudIntegrationsGenerateConnectionArtifact(w http.ResponseWriter, r *http.Request) {
cloudProviderString := mux.Vars(r)["cloudProvider"]
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
if err != nil {
render.Error(w, err)
return
}
aH.Respond(w, result)
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
reqBody, err := io.ReadAll(r.Body)
if err != nil {
render.Error(w, errors.WrapInternalf(err, errors.CodeInternal, "failed to read request body"))
return
}
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].GenerateConnectionArtifact(r.Context(), &integrationtypes.PostableConnectionArtifact{
OrgID: claims.OrgID,
Data: reqBody,
})
if err != nil {
aH.Signoz.Instrumentation.Logger().ErrorContext(r.Context(),
"failed to generate connection artifact for cloud integration",
slog.String("cloudProvider", cloudProviderString),
slog.String("orgID", claims.OrgID),
)
render.Error(w, err)
return
}
render.Success(w, http.StatusOK, resp)
}
func (aH *APIHandler) CloudIntegrationsUpdateAccountConfig(
w http.ResponseWriter, r *http.Request,
) {
cloudProvider := mux.Vars(r)["cloudProvider"]
func (aH *APIHandler) CloudIntegrationsListConnectedAccounts(w http.ResponseWriter, r *http.Request) {
cloudProviderString := mux.Vars(r)["cloudProvider"]
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
if err != nil {
render.Error(w, err)
return
}
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].ListConnectedAccounts(r.Context(), claims.OrgID)
if err != nil {
render.Error(w, err)
return
}
render.Success(w, http.StatusOK, resp)
}
func (aH *APIHandler) CloudIntegrationsGetAccountStatus(w http.ResponseWriter, r *http.Request) {
cloudProviderString := mux.Vars(r)["cloudProvider"]
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
if err != nil {
render.Error(w, err)
return
}
accountId := mux.Vars(r)["accountId"]
req := cloudintegrations.UpdateAccountConfigRequest{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
if errv2 != nil {
render.Error(w, errv2)
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].GetAccountStatus(r.Context(), claims.OrgID, accountId)
if err != nil {
render.Error(w, err)
return
}
result, apiErr := aH.CloudIntegrationsController.UpdateAccountConfig(
r.Context(), claims.OrgID, cloudProvider, accountId, req,
)
if apiErr != nil {
RespondError(w, apiErr, nil)
return
}
aH.Respond(w, result)
render.Success(w, http.StatusOK, resp)
}
func (aH *APIHandler) CloudIntegrationsDisconnectAccount(
w http.ResponseWriter, r *http.Request,
) {
cloudProvider := mux.Vars(r)["cloudProvider"]
func (aH *APIHandler) CloudIntegrationsAgentCheckIn(w http.ResponseWriter, r *http.Request) {
cloudProviderString := mux.Vars(r)["cloudProvider"]
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
if err != nil {
render.Error(w, err)
return
}
req := new(integrationtypes.PostableAgentCheckInPayload)
if err = json.NewDecoder(r.Body).Decode(req); err != nil {
render.Error(w, errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "invalid request body"))
return
}
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
req.OrgID = claims.OrgID
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].AgentCheckIn(r.Context(), req)
if err != nil {
render.Error(w, err)
return
}
render.Success(w, http.StatusOK, resp)
}
func (aH *APIHandler) CloudIntegrationsUpdateAccountConfig(w http.ResponseWriter, r *http.Request) {
cloudProviderString := mux.Vars(r)["cloudProvider"]
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
if err != nil {
render.Error(w, err)
return
}
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
accountId := mux.Vars(r)["accountId"]
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
if errv2 != nil {
render.Error(w, errv2)
reqBody, err := io.ReadAll(r.Body)
if err != nil {
render.Error(w, errors.WrapInternalf(err, errors.CodeInternal, "failed to read request body"))
return
}
result, apiErr := aH.CloudIntegrationsController.DisconnectAccount(
r.Context(), claims.OrgID, cloudProvider, accountId,
)
if apiErr != nil {
RespondError(w, apiErr, nil)
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].UpdateAccountConfig(r.Context(), &integrationtypes.PatchableAccountConfig{
OrgID: claims.OrgID,
AccountId: accountId,
Data: reqBody,
})
if err != nil {
render.Error(w, err)
return
}
aH.Respond(w, result)
render.Success(w, http.StatusOK, resp)
return
}
func (aH *APIHandler) CloudIntegrationsListServices(
w http.ResponseWriter, r *http.Request,
) {
cloudProvider := mux.Vars(r)["cloudProvider"]
func (aH *APIHandler) CloudIntegrationsDisconnectAccount(w http.ResponseWriter, r *http.Request) {
cloudProviderString := mux.Vars(r)["cloudProvider"]
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
if err != nil {
render.Error(w, err)
return
}
accountId := mux.Vars(r)["accountId"]
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
result, err := aH.cloudIntegrationsRegistry[cloudProvider].DisconnectAccount(r.Context(), claims.OrgID, accountId)
if err != nil {
render.Error(w, err)
return
}
render.Success(w, http.StatusOK, result)
}
func (aH *APIHandler) CloudIntegrationsListServices(w http.ResponseWriter, r *http.Request) {
cloudProviderString := mux.Vars(r)["cloudProvider"]
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
if err != nil {
render.Error(w, err)
return
}
var cloudAccountId *string
@@ -3687,26 +3738,22 @@ func (aH *APIHandler) CloudIntegrationsListServices(
cloudAccountId = &cloudAccountIdQP
}
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
if errv2 != nil {
render.Error(w, errv2)
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
resp, apiErr := aH.CloudIntegrationsController.ListServices(
r.Context(), claims.OrgID, cloudProvider, cloudAccountId,
)
if apiErr != nil {
RespondError(w, apiErr, nil)
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].ListServices(r.Context(), claims.OrgID, cloudAccountId)
if err != nil {
render.Error(w, err)
return
}
aH.Respond(w, resp)
render.Success(w, http.StatusOK, resp)
}
func (aH *APIHandler) CloudIntegrationsGetServiceDetails(
w http.ResponseWriter, r *http.Request,
) {
func (aH *APIHandler) CloudIntegrationsGetServiceDetails(w http.ResponseWriter, r *http.Request) {
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
@@ -3718,7 +3765,14 @@ func (aH *APIHandler) CloudIntegrationsGetServiceDetails(
return
}
cloudProvider := mux.Vars(r)["cloudProvider"]
cloudProviderString := mux.Vars(r)["cloudProvider"]
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
if err != nil {
render.Error(w, err)
return
}
serviceId := mux.Vars(r)["serviceId"]
var cloudAccountId *string
@@ -3728,270 +3782,59 @@ func (aH *APIHandler) CloudIntegrationsGetServiceDetails(
cloudAccountId = &cloudAccountIdQP
}
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
if errv2 != nil {
render.Error(w, errv2)
return
}
resp, err := aH.CloudIntegrationsController.GetServiceDetails(
r.Context(), claims.OrgID, cloudProvider, serviceId, cloudAccountId,
)
resp, err := aH.cloudIntegrationsRegistry[cloudProvider].GetServiceDetails(r.Context(), &integrationtypes.GetServiceDetailsReq{
OrgID: orgID,
ServiceId: serviceId,
CloudAccountID: cloudAccountId,
})
if err != nil {
render.Error(w, err)
return
}
// Add connection status for the 2 signals.
if cloudAccountId != nil {
connStatus, apiErr := aH.calculateCloudIntegrationServiceConnectionStatus(
r.Context(), orgID, cloudProvider, *cloudAccountId, resp,
)
if apiErr != nil {
RespondError(w, apiErr, nil)
return
}
resp.ConnectionStatus = connStatus
}
aH.Respond(w, resp)
render.Success(w, http.StatusOK, resp)
return
}
func (aH *APIHandler) calculateCloudIntegrationServiceConnectionStatus(
ctx context.Context,
orgID valuer.UUID,
cloudProvider string,
cloudAccountId string,
svcDetails *cloudintegrations.ServiceDetails,
) (*cloudintegrations.ServiceConnectionStatus, *model.ApiError) {
if cloudProvider != "aws" {
// TODO(Raj): Make connection check generic for all providers in a follow up change
return nil, model.BadRequest(
fmt.Errorf("unsupported cloud provider: %s", cloudProvider),
)
}
func (aH *APIHandler) CloudIntegrationsUpdateServiceConfig(w http.ResponseWriter, r *http.Request) {
cloudProviderString := mux.Vars(r)["cloudProvider"]
telemetryCollectionStrategy := svcDetails.Strategy
if telemetryCollectionStrategy == nil {
return nil, model.InternalError(fmt.Errorf(
"service doesn't have telemetry collection strategy: %s", svcDetails.Id,
))
}
result := &cloudintegrations.ServiceConnectionStatus{}
errors := []*model.ApiError{}
var resultLock sync.Mutex
var wg sync.WaitGroup
// Calculate metrics connection status
if telemetryCollectionStrategy.AWSMetrics != nil {
wg.Add(1)
go func() {
defer wg.Done()
metricsConnStatus, apiErr := aH.calculateAWSIntegrationSvcMetricsConnectionStatus(
ctx, cloudAccountId, telemetryCollectionStrategy.AWSMetrics, svcDetails.DataCollected.Metrics,
)
resultLock.Lock()
defer resultLock.Unlock()
if apiErr != nil {
errors = append(errors, apiErr)
} else {
result.Metrics = metricsConnStatus
}
}()
}
// Calculate logs connection status
if telemetryCollectionStrategy.AWSLogs != nil {
wg.Add(1)
go func() {
defer wg.Done()
logsConnStatus, apiErr := aH.calculateAWSIntegrationSvcLogsConnectionStatus(
ctx, orgID, cloudAccountId, telemetryCollectionStrategy.AWSLogs,
)
resultLock.Lock()
defer resultLock.Unlock()
if apiErr != nil {
errors = append(errors, apiErr)
} else {
result.Logs = logsConnStatus
}
}()
}
wg.Wait()
if len(errors) > 0 {
return nil, errors[0]
}
return result, nil
}
func (aH *APIHandler) calculateAWSIntegrationSvcMetricsConnectionStatus(
ctx context.Context,
cloudAccountId string,
strategy *services.AWSMetricsStrategy,
metricsCollectedBySvc []services.CollectedMetric,
) (*cloudintegrations.SignalConnectionStatus, *model.ApiError) {
if strategy == nil || len(strategy.StreamFilters) < 1 {
return nil, nil
}
expectedLabelValues := map[string]string{
"cloud_provider": "aws",
"cloud_account_id": cloudAccountId,
}
metricsNamespace := strategy.StreamFilters[0].Namespace
metricsNamespaceParts := strings.Split(metricsNamespace, "/")
if len(metricsNamespaceParts) >= 2 {
expectedLabelValues["service_namespace"] = metricsNamespaceParts[0]
expectedLabelValues["service_name"] = metricsNamespaceParts[1]
} else {
// metrics for single word namespaces like "CWAgent" do not
// have the service_namespace label populated
expectedLabelValues["service_name"] = metricsNamespaceParts[0]
}
metricNamesCollectedBySvc := []string{}
for _, cm := range metricsCollectedBySvc {
metricNamesCollectedBySvc = append(metricNamesCollectedBySvc, cm.Name)
}
statusForLastReceivedMetric, apiErr := aH.reader.GetLatestReceivedMetric(
ctx, metricNamesCollectedBySvc, expectedLabelValues,
)
if apiErr != nil {
return nil, apiErr
}
if statusForLastReceivedMetric != nil {
return &cloudintegrations.SignalConnectionStatus{
LastReceivedTsMillis: statusForLastReceivedMetric.LastReceivedTsMillis,
LastReceivedFrom: "signoz-aws-integration",
}, nil
}
return nil, nil
}
func (aH *APIHandler) calculateAWSIntegrationSvcLogsConnectionStatus(
ctx context.Context,
orgID valuer.UUID,
cloudAccountId string,
strategy *services.AWSLogsStrategy,
) (*cloudintegrations.SignalConnectionStatus, *model.ApiError) {
if strategy == nil || len(strategy.Subscriptions) < 1 {
return nil, nil
}
logGroupNamePrefix := strategy.Subscriptions[0].LogGroupNamePrefix
if len(logGroupNamePrefix) < 1 {
return nil, nil
}
logsConnTestFilter := &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{
Key: "cloud.account.id",
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
Operator: "=",
Value: cloudAccountId,
},
{
Key: v3.AttributeKey{
Key: "aws.cloudwatch.log_group_name",
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
Operator: "like",
Value: logGroupNamePrefix + "%",
},
},
}
// TODO(Raj): Receive this as a param from UI in the future.
lookbackSeconds := int64(30 * 60)
qrParams := &v3.QueryRangeParamsV3{
Start: time.Now().UnixMilli() - (lookbackSeconds * 1000),
End: time.Now().UnixMilli(),
CompositeQuery: &v3.CompositeQuery{
PanelType: v3.PanelTypeList,
QueryType: v3.QueryTypeBuilder,
BuilderQueries: map[string]*v3.BuilderQuery{
"A": {
PageSize: 1,
Filters: logsConnTestFilter,
QueryName: "A",
DataSource: v3.DataSourceLogs,
Expression: "A",
AggregateOperator: v3.AggregateOperatorNoOp,
},
},
},
}
queryRes, _, err := aH.querier.QueryRange(
ctx, orgID, qrParams,
)
cloudProvider, err := integrationtypes.NewCloudProvider(cloudProviderString)
if err != nil {
return nil, model.InternalError(fmt.Errorf(
"could not query for integration connection status: %w", err,
))
}
if len(queryRes) > 0 && queryRes[0].List != nil && len(queryRes[0].List) > 0 {
lastLog := queryRes[0].List[0]
return &cloudintegrations.SignalConnectionStatus{
LastReceivedTsMillis: lastLog.Timestamp.UnixMilli(),
LastReceivedFrom: "signoz-aws-integration",
}, nil
render.Error(w, err)
return
}
return nil, nil
}
func (aH *APIHandler) CloudIntegrationsUpdateServiceConfig(
w http.ResponseWriter, r *http.Request,
) {
cloudProvider := mux.Vars(r)["cloudProvider"]
serviceId := mux.Vars(r)["serviceId"]
req := cloudintegrations.UpdateServiceConfigRequest{}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
RespondError(w, model.BadRequest(err), nil)
return
}
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
if errv2 != nil {
render.Error(w, errv2)
return
}
result, err := aH.CloudIntegrationsController.UpdateServiceConfig(
r.Context(), claims.OrgID, cloudProvider, serviceId, &req,
)
claims, err := authtypes.ClaimsFromContext(r.Context())
if err != nil {
render.Error(w, err)
return
}
aH.Respond(w, result)
reqBody, err := io.ReadAll(r.Body)
if err != nil {
render.Error(w, errors.WrapInternalf(err,
errors.CodeInternal,
"failed to read update service config request body",
))
return
}
result, err := aH.cloudIntegrationsRegistry[cloudProvider].UpdateServiceConfig(
r.Context(), &integrationtypes.UpdatableServiceConfigReq{
OrgID: claims.OrgID,
ServiceId: serviceId,
Config: reqBody,
},
)
if err != nil {
render.Error(w, err)
return
}
render.Success(w, http.StatusOK, result)
}
// logs

View File

@@ -11,6 +11,7 @@ import (
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
"github.com/SigNoz/signoz/pkg/valuer"
@@ -107,7 +108,7 @@ type IntegrationsListItem struct {
type Integration struct {
IntegrationDetails
Installation *types.InstalledIntegration `json:"installation"`
Installation *integrationtypes.InstalledIntegration `json:"installation"`
}
type Manager struct {
@@ -223,7 +224,7 @@ func (m *Manager) InstallIntegration(
ctx context.Context,
orgId string,
integrationId string,
config types.InstalledIntegrationConfig,
config integrationtypes.InstalledIntegrationConfig,
) (*IntegrationsListItem, *model.ApiError) {
integrationDetails, apiErr := m.getIntegrationDetails(ctx, integrationId)
if apiErr != nil {
@@ -429,7 +430,7 @@ func (m *Manager) getInstalledIntegration(
ctx context.Context,
orgId string,
integrationId string,
) (*types.InstalledIntegration, *model.ApiError) {
) (*integrationtypes.InstalledIntegration, *model.ApiError) {
iis, apiErr := m.installedIntegrationsRepo.get(
ctx, orgId, []string{integrationId},
)
@@ -457,7 +458,7 @@ func (m *Manager) getInstalledIntegrations(
return nil, apiErr
}
installedTypes := utils.MapSlice(installations, func(i types.InstalledIntegration) string {
installedTypes := utils.MapSlice(installations, func(i integrationtypes.InstalledIntegration) string {
return i.Type
})
integrationDetails, apiErr := m.availableIntegrationsRepo.get(ctx, installedTypes)

View File

@@ -4,22 +4,22 @@ import (
"context"
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
)
type InstalledIntegrationsRepo interface {
list(ctx context.Context, orgId string) ([]types.InstalledIntegration, *model.ApiError)
list(ctx context.Context, orgId string) ([]integrationtypes.InstalledIntegration, *model.ApiError)
get(
ctx context.Context, orgId string, integrationTypes []string,
) (map[string]types.InstalledIntegration, *model.ApiError)
) (map[string]integrationtypes.InstalledIntegration, *model.ApiError)
upsert(
ctx context.Context,
orgId string,
integrationType string,
config types.InstalledIntegrationConfig,
) (*types.InstalledIntegration, *model.ApiError)
config integrationtypes.InstalledIntegrationConfig,
) (*integrationtypes.InstalledIntegration, *model.ApiError)
delete(ctx context.Context, orgId string, integrationType string) *model.ApiError
}

View File

@@ -7,6 +7,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/model"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types"
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/uptrace/bun"
)
@@ -26,8 +27,8 @@ func NewInstalledIntegrationsSqliteRepo(store sqlstore.SQLStore) (
func (r *InstalledIntegrationsSqliteRepo) list(
ctx context.Context,
orgId string,
) ([]types.InstalledIntegration, *model.ApiError) {
integrations := []types.InstalledIntegration{}
) ([]integrationtypes.InstalledIntegration, *model.ApiError) {
integrations := []integrationtypes.InstalledIntegration{}
err := r.store.BunDB().NewSelect().
Model(&integrations).
@@ -44,8 +45,8 @@ func (r *InstalledIntegrationsSqliteRepo) list(
func (r *InstalledIntegrationsSqliteRepo) get(
ctx context.Context, orgId string, integrationTypes []string,
) (map[string]types.InstalledIntegration, *model.ApiError) {
integrations := []types.InstalledIntegration{}
) (map[string]integrationtypes.InstalledIntegration, *model.ApiError) {
integrations := []integrationtypes.InstalledIntegration{}
typeValues := []interface{}{}
for _, integrationType := range integrationTypes {
@@ -62,7 +63,7 @@ func (r *InstalledIntegrationsSqliteRepo) get(
))
}
result := map[string]types.InstalledIntegration{}
result := map[string]integrationtypes.InstalledIntegration{}
for _, ii := range integrations {
result[ii.Type] = ii
}
@@ -74,10 +75,10 @@ func (r *InstalledIntegrationsSqliteRepo) upsert(
ctx context.Context,
orgId string,
integrationType string,
config types.InstalledIntegrationConfig,
) (*types.InstalledIntegration, *model.ApiError) {
config integrationtypes.InstalledIntegrationConfig,
) (*integrationtypes.InstalledIntegration, *model.ApiError) {
integration := types.InstalledIntegration{
integration := integrationtypes.InstalledIntegration{
Identifiable: types.Identifiable{
ID: valuer.GenerateUUID(),
},
@@ -114,7 +115,7 @@ func (r *InstalledIntegrationsSqliteRepo) delete(
ctx context.Context, orgId string, integrationType string,
) *model.ApiError {
_, dbErr := r.store.BunDB().NewDelete().
Model(&types.InstalledIntegration{}).
Model(&integrationtypes.InstalledIntegration{}).
Where("type = ?", integrationType).
Where("org_id = ?", orgId).
Exec(ctx)

View File

@@ -25,7 +25,6 @@ import (
"github.com/SigNoz/signoz/pkg/querier"
"github.com/SigNoz/signoz/pkg/query-service/agentConf"
"github.com/SigNoz/signoz/pkg/query-service/app/clickhouseReader"
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations"
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
"github.com/SigNoz/signoz/pkg/query-service/app/logparsingpipeline"
"github.com/SigNoz/signoz/pkg/query-service/app/opamp"
@@ -70,11 +69,6 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
return nil, err
}
cloudIntegrationsController, err := cloudintegrations.NewController(signoz.SQLStore)
if err != nil {
return nil, err
}
cacheForTraceDetail, err := memorycache.New(context.TODO(), signoz.Instrumentation.ToProviderSettings(), cache.Config{
Provider: "memory",
Memory: cache.Memory{
@@ -126,13 +120,13 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
Reader: reader,
RuleManager: rm,
IntegrationsController: integrationsController,
CloudIntegrationsController: cloudIntegrationsController,
LogsParsingPipelineController: logParsingPipelineController,
FluxInterval: config.Querier.FluxInterval,
AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager),
LicensingAPI: nooplicensing.NewLicenseAPI(),
Signoz: signoz,
QueryParserAPI: queryparser.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.QueryParser),
Logger: signoz.Instrumentation.Logger(),
}, config)
if err != nil {
return nil, err

View File

@@ -0,0 +1,13 @@
package utils
import (
"runtime/debug"
)
func RecoverPanic(callback func(err interface{}, stack []byte)) {
if r := recover(); r != nil {
if callback != nil {
callback(r, debug.Stack())
}
}
}

View File

@@ -11,11 +11,8 @@ import (
"github.com/SigNoz/signoz/pkg/alertmanager/signozalertmanager"
"github.com/SigNoz/signoz/pkg/emailing/emailingtest"
"github.com/SigNoz/signoz/pkg/factory/factorytest"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
"github.com/SigNoz/signoz/pkg/modules/user/impluser"
"github.com/SigNoz/signoz/pkg/querier"
"github.com/SigNoz/signoz/pkg/queryparser"
"github.com/SigNoz/signoz/pkg/sharder"
@@ -44,13 +41,7 @@ func TestNewHandlers(t *testing.T) {
queryParser := queryparser.New(providerSettings)
require.NoError(t, err)
dashboardModule := impldashboard.NewModule(impldashboard.NewStore(sqlstore), providerSettings, nil, orgGetter, queryParser)
flagger, err := flagger.New(context.Background(), instrumentationtest.New().ToProviderSettings(), flagger.Config{}, flagger.MustNewRegistry())
require.NoError(t, err)
userGetter := impluser.NewGetter(impluser.NewStore(sqlstore, providerSettings), flagger)
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter)
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule)
querierHandler := querier.NewHandler(providerSettings, nil, nil)
handlers := NewHandlers(modules, providerSettings, nil, querierHandler, nil, nil, nil, nil, nil, nil, nil)

View File

@@ -85,11 +85,11 @@ func NewModules(
queryParser queryparser.QueryParser,
config Config,
dashboard dashboard.Module,
userGetter user.Getter,
) Modules {
quickfilter := implquickfilter.NewModule(implquickfilter.NewStore(sqlstore))
orgSetter := implorganization.NewSetter(implorganization.NewStore(sqlstore), alertmanager, quickfilter)
user := impluser.NewModule(impluser.NewStore(sqlstore, providerSettings), tokenizer, emailing, providerSettings, orgSetter, authz, analytics, config.User)
userGetter := impluser.NewGetter(impluser.NewStore(sqlstore, providerSettings))
ruleStore := sqlrulestore.NewRuleStore(sqlstore, queryParser, providerSettings)
return Modules{

View File

@@ -11,11 +11,8 @@ import (
"github.com/SigNoz/signoz/pkg/alertmanager/signozalertmanager"
"github.com/SigNoz/signoz/pkg/emailing/emailingtest"
"github.com/SigNoz/signoz/pkg/factory/factorytest"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
"github.com/SigNoz/signoz/pkg/modules/user/impluser"
"github.com/SigNoz/signoz/pkg/queryparser"
"github.com/SigNoz/signoz/pkg/sharder"
"github.com/SigNoz/signoz/pkg/sharder/noopsharder"
@@ -43,13 +40,7 @@ func TestNewModules(t *testing.T) {
queryParser := queryparser.New(providerSettings)
require.NoError(t, err)
dashboardModule := impldashboard.NewModule(impldashboard.NewStore(sqlstore), providerSettings, nil, orgGetter, queryParser)
flagger, err := flagger.New(context.Background(), instrumentationtest.New().ToProviderSettings(), flagger.Config{}, flagger.MustNewRegistry())
require.NoError(t, err)
userGetter := impluser.NewGetter(impluser.NewStore(sqlstore, providerSettings), flagger)
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter)
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule)
reflectVal := reflect.ValueOf(modules)
for i := 0; i < reflectVal.NumField(); i++ {

View File

@@ -169,7 +169,6 @@ func NewSQLMigrationProviderFactories(
sqlmigration.NewAddAnonymousPublicDashboardTransactionFactory(sqlstore),
sqlmigration.NewAddRootUserFactory(sqlstore, sqlschema),
sqlmigration.NewAddUserEmailOrgIDIndexFactory(sqlstore, sqlschema),
sqlmigration.NewMigrateRulesV4ToV5Factory(sqlstore, telemetryStore),
)
}

View File

@@ -1,13 +1,11 @@
package signoz
import (
"context"
"testing"
"github.com/DATA-DOG/go-sqlmock"
"github.com/SigNoz/signoz/pkg/alertmanager/nfmanager/nfmanagertest"
"github.com/SigNoz/signoz/pkg/analytics"
"github.com/SigNoz/signoz/pkg/flagger"
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
"github.com/SigNoz/signoz/pkg/modules/user/impluser"
@@ -77,12 +75,7 @@ func TestNewProviderFactories(t *testing.T) {
})
assert.NotPanics(t, func() {
flagger, err := flagger.New(context.Background(), instrumentationtest.New().ToProviderSettings(), flagger.Config{}, flagger.MustNewRegistry())
if err != nil {
panic(err)
}
userGetter := impluser.NewGetter(impluser.NewStore(sqlstoretest.New(sqlstore.Config{Provider: "sqlite"}, sqlmock.QueryMatcherEqual), instrumentationtest.New().ToProviderSettings()), flagger)
userGetter := impluser.NewGetter(impluser.NewStore(sqlstoretest.New(sqlstore.Config{Provider: "sqlite"}, sqlmock.QueryMatcherEqual), instrumentationtest.New().ToProviderSettings()))
orgGetter := implorganization.NewGetter(implorganization.NewStore(sqlstoretest.New(sqlstore.Config{Provider: "sqlite"}, sqlmock.QueryMatcherEqual)), nil)
telemetryStore := telemetrystoretest.New(telemetrystore.Config{Provider: "clickhouse"}, sqlmock.QueryMatcherEqual)
NewStatsReporterProviderFactories(telemetryStore, []statsreporter.StatsCollector{}, orgGetter, userGetter, tokenizertest.NewMockTokenizer(t), version.Build{}, analytics.Config{Enabled: true})

View File

@@ -280,7 +280,7 @@ func New(
}
// Initialize user getter
userGetter := impluser.NewGetter(impluser.NewStore(sqlstore, providerSettings), flagger)
userGetter := impluser.NewGetter(impluser.NewStore(sqlstore, providerSettings))
licensingProviderFactory := licenseProviderFactory(sqlstore, zeus, orgGetter, analytics)
licensing, err := licensingProviderFactory.New(
@@ -388,7 +388,7 @@ func New(
}
// Initialize all modules
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, analytics, querier, telemetrystore, telemetryMetadataStore, authNs, authz, cache, queryParser, config, dashboard, userGetter)
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, analytics, querier, telemetrystore, telemetryMetadataStore, authNs, authz, cache, queryParser, config, dashboard)
userService := impluser.NewService(providerSettings, impluser.NewStore(sqlstore, providerSettings), modules.User, orgGetter, authz, config.User.Root)

View File

@@ -1,209 +0,0 @@
package sqlmigration
import (
"context"
"database/sql"
"encoding/json"
"log/slog"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/telemetrystore"
"github.com/SigNoz/signoz/pkg/transition"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
type migrateRulesV4ToV5 struct {
store sqlstore.SQLStore
telemetryStore telemetrystore.TelemetryStore
logger *slog.Logger
}
func NewMigrateRulesV4ToV5Factory(
store sqlstore.SQLStore,
telemetryStore telemetrystore.TelemetryStore,
) factory.ProviderFactory[SQLMigration, Config] {
return factory.NewProviderFactory(
factory.MustNewName("migrate_rules_post_deprecation"),
func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
return &migrateRulesV4ToV5{
store: store,
telemetryStore: telemetryStore,
logger: ps.Logger,
}, nil
})
}
func (migration *migrateRulesV4ToV5) Register(migrations *migrate.Migrations) error {
if err := migrations.Register(migration.Up, migration.Down); err != nil {
return err
}
return nil
}
func (migration *migrateRulesV4ToV5) getLogDuplicateKeys(ctx context.Context) ([]string, error) {
query := `
SELECT name
FROM (
SELECT DISTINCT name FROM signoz_logs.distributed_logs_attribute_keys
INTERSECT
SELECT DISTINCT name FROM signoz_logs.distributed_logs_resource_keys
)
ORDER BY name
`
rows, err := migration.telemetryStore.ClickhouseDB().Query(ctx, query)
if err != nil {
migration.logger.WarnContext(ctx, "failed to query log duplicate keys", "error", err)
return nil, nil
}
defer rows.Close()
var keys []string
for rows.Next() {
var key string
if err := rows.Scan(&key); err != nil {
migration.logger.WarnContext(ctx, "failed to scan log duplicate key", "error", err)
continue
}
keys = append(keys, key)
}
return keys, nil
}
func (migration *migrateRulesV4ToV5) getTraceDuplicateKeys(ctx context.Context) ([]string, error) {
query := `
SELECT tagKey
FROM signoz_traces.distributed_span_attributes_keys
WHERE tagType IN ('tag', 'resource')
GROUP BY tagKey
HAVING COUNT(DISTINCT tagType) > 1
ORDER BY tagKey
`
rows, err := migration.telemetryStore.ClickhouseDB().Query(ctx, query)
if err != nil {
migration.logger.WarnContext(ctx, "failed to query trace duplicate keys", "error", err)
return nil, nil
}
defer rows.Close()
var keys []string
for rows.Next() {
var key string
if err := rows.Scan(&key); err != nil {
migration.logger.WarnContext(ctx, "failed to scan trace duplicate key", "error", err)
continue
}
keys = append(keys, key)
}
return keys, nil
}
func (migration *migrateRulesV4ToV5) Up(ctx context.Context, db *bun.DB) error {
logsKeys, err := migration.getLogDuplicateKeys(ctx)
if err != nil {
return err
}
tracesKeys, err := migration.getTraceDuplicateKeys(ctx)
if err != nil {
return err
}
tx, err := db.BeginTx(ctx, nil)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
var rules []struct {
ID string `bun:"id"`
Data map[string]any `bun:"data"`
}
err = tx.NewSelect().
Table("rule").
Column("id", "data").
Scan(ctx, &rules)
if err != nil {
if err == sql.ErrNoRows {
return nil
}
return err
}
alertsMigrator := transition.NewAlertMigrateV5(migration.logger, logsKeys, tracesKeys)
count := 0
for _, rule := range rules {
version, _ := rule.Data["version"].(string)
if version == "v5" {
continue
}
if version == "" {
migration.logger.WarnContext(ctx, "unexpected empty version for rule", "rule_id", rule.ID)
}
migration.logger.InfoContext(ctx, "migrating rule v4 to v5", "rule_id", rule.ID, "current_version", version)
// Check if the queries envelope already exists and is non-empty
hasQueriesEnvelope := false
if condition, ok := rule.Data["condition"].(map[string]any); ok {
if compositeQuery, ok := condition["compositeQuery"].(map[string]any); ok {
if queries, ok := compositeQuery["queries"].([]any); ok && len(queries) > 0 {
hasQueriesEnvelope = true
}
}
}
if hasQueriesEnvelope {
// already has queries envelope, just bump version
// this is because user made a mistake of choosing version
migration.logger.InfoContext(ctx, "rule already has queries envelope, bumping version", "rule_id", rule.ID)
rule.Data["version"] = "v5"
} else {
// old format, run full migration
migration.logger.InfoContext(ctx, "rule has old format, running full migration", "rule_id", rule.ID)
updated := alertsMigrator.Migrate(ctx, rule.Data)
if !updated {
migration.logger.WarnContext(ctx, "expected updated to be true but got false", "rule_id", rule.ID)
continue
}
rule.Data["version"] = "v5"
}
dataJSON, err := json.Marshal(rule.Data)
if err != nil {
return err
}
_, err = tx.NewUpdate().
Table("rule").
Set("data = ?", string(dataJSON)).
Where("id = ?", rule.ID).
Exec(ctx)
if err != nil {
return err
}
count++
}
if count != 0 {
migration.logger.InfoContext(ctx, "migrate v4 alerts", "count", count)
}
return tx.Commit()
}
func (migration *migrateRulesV4ToV5) Down(ctx context.Context, db *bun.DB) error {
return nil
}

View File

@@ -1,7 +1,6 @@
package authtypes
import (
"encoding"
"encoding/json"
"regexp"
@@ -11,10 +10,8 @@ import (
var (
nameRegex = regexp.MustCompile("^[a-z-]{1,50}$")
_ json.Marshaler = new(Name)
_ json.Unmarshaler = new(Name)
_ encoding.TextMarshaler = new(Name)
_ encoding.TextUnmarshaler = new(Name)
_ json.Marshaler = new(Name)
_ json.Unmarshaler = new(Name)
)
type Name struct {
@@ -61,16 +58,3 @@ func (name *Name) UnmarshalJSON(data []byte) error {
*name = shadow
return nil
}
func (name Name) MarshalText() ([]byte, error) {
return []byte(name.val), nil
}
func (name *Name) UnmarshalText(text []byte) error {
shadow, err := NewName(string(text))
if err != nil {
return err
}
*name = shadow
return nil
}

View File

@@ -1,177 +0,0 @@
package authtypes
import (
"encoding/json"
"slices"
"strings"
"github.com/SigNoz/signoz/pkg/errors"
)
type Resource struct {
Name Name `json:"name" required:"true"`
Type Type `json:"type" required:"true"`
}
type GettableResources struct {
Resources []*Resource `json:"resources" required:"true" nullable:"false"`
Relations map[Relation][]Type `json:"relations" required:"true"`
}
type Object struct {
Resource Resource `json:"resource" required:"true"`
Selector Selector `json:"selector" required:"true"`
}
type GettableObjects struct {
Resource Resource `json:"resource" required:"true"`
Selectors []Selector `json:"selectors" required:"true" nullable:"false"`
}
type PatchableObjects struct {
Additions []*GettableObjects `json:"additions" required:"true" nullable:"true"`
Deletions []*GettableObjects `json:"deletions" required:"true" nullable:"true"`
}
func NewObject(resource Resource, selector Selector) (*Object, error) {
err := IsValidSelector(resource.Type, selector.String())
if err != nil {
return nil, err
}
return &Object{Resource: resource, Selector: selector}, nil
}
func NewObjectsFromGettableObjects(patchableObjects []*GettableObjects) ([]*Object, error) {
objects := make([]*Object, 0)
for _, patchObject := range patchableObjects {
for _, selector := range patchObject.Selectors {
object, err := NewObject(patchObject.Resource, selector)
if err != nil {
return nil, err
}
objects = append(objects, object)
}
}
return objects, nil
}
func NewPatchableObjects(additions []*GettableObjects, deletions []*GettableObjects, relation Relation) ([]*Object, []*Object, error) {
if len(additions) == 0 && len(deletions) == 0 {
return nil, nil, errors.New(errors.TypeInvalidInput, ErrCodeInvalidPatchObject, "empty object patch request received, at least one of additions or deletions must be present")
}
for _, object := range additions {
if !slices.Contains(TypeableRelations[object.Resource.Type], relation) {
return nil, nil, errors.Newf(errors.TypeInvalidInput, ErrCodeAuthZInvalidRelation, "relation %s is invalid for type %s", relation.StringValue(), object.Resource.Type.StringValue())
}
}
for _, object := range deletions {
if !slices.Contains(TypeableRelations[object.Resource.Type], relation) {
return nil, nil, errors.Newf(errors.TypeInvalidInput, ErrCodeAuthZInvalidRelation, "relation %s is invalid for type %s", relation.StringValue(), object.Resource.Type.StringValue())
}
}
additionObjects, err := NewObjectsFromGettableObjects(additions)
if err != nil {
return nil, nil, err
}
deletionsObjects, err := NewObjectsFromGettableObjects(deletions)
if err != nil {
return nil, nil, err
}
return additionObjects, deletionsObjects, nil
}
func NewGettableResources(resources []*Resource) *GettableResources {
return &GettableResources{
Resources: resources,
Relations: RelationsTypeable,
}
}
func NewGettableObjects(objects []*Object) []*GettableObjects {
grouped := make(map[Resource][]Selector)
for _, obj := range objects {
key := obj.Resource
if _, ok := grouped[key]; !ok {
grouped[key] = make([]Selector, 0)
}
grouped[key] = append(grouped[key], obj.Selector)
}
gettableObjects := make([]*GettableObjects, 0, len(grouped))
for resource, selectors := range grouped {
gettableObjects = append(gettableObjects, &GettableObjects{
Resource: resource,
Selectors: selectors,
})
}
return gettableObjects
}
func MustNewObject(resource Resource, selector Selector) *Object {
object, err := NewObject(resource, selector)
if err != nil {
panic(err)
}
return object
}
func MustNewObjectFromString(input string) *Object {
parts := strings.Split(input, "/")
if len(parts) != 4 {
panic(errors.Newf(errors.TypeInternal, errors.CodeInternal, "invalid input format: %s", input))
}
typeParts := strings.Split(parts[0], ":")
if len(typeParts) != 2 {
panic(errors.Newf(errors.TypeInternal, errors.CodeInternal, "invalid type format: %s", parts[0]))
}
resource := Resource{
Type: MustNewType(typeParts[0]),
Name: MustNewName(parts[2]),
}
selector := MustNewSelector(resource.Type, parts[3])
return &Object{Resource: resource, Selector: selector}
}
func MustNewObjectsFromStringSlice(input []string) []*Object {
objects := make([]*Object, 0, len(input))
for _, str := range input {
objects = append(objects, MustNewObjectFromString(str))
}
return objects
}
func (object *Object) UnmarshalJSON(data []byte) error {
var shadow = struct {
Resource Resource
Selector Selector
}{}
err := json.Unmarshal(data, &shadow)
if err != nil {
return err
}
obj, err := NewObject(shadow.Resource, shadow.Selector)
if err != nil {
return err
}
*object = *obj
return nil
}

View File

@@ -7,7 +7,6 @@ import (
var (
ErrCodeAuthZInvalidRelation = errors.MustNewCode("authz_invalid_relation")
ErrCodeInvalidPatchObject = errors.MustNewCode("authz_invalid_patch_objects")
)
var (
@@ -27,14 +26,6 @@ var TypeableRelations = map[Type][]Relation{
TypeMetaResources: {RelationCreate, RelationList},
}
var RelationsTypeable = map[Relation][]Type{
RelationCreate: {TypeMetaResources},
RelationRead: {TypeUser, TypeRole, TypeOrganization, TypeMetaResource},
RelationList: {TypeMetaResources},
RelationUpdate: {TypeUser, TypeRole, TypeOrganization, TypeMetaResource},
RelationDelete: {TypeUser, TypeRole, TypeOrganization, TypeMetaResource},
}
type Relation struct{ valuer.String }
func NewRelation(relation string) (Relation, error) {

View File

@@ -1,7 +1,6 @@
package authtypes
import (
"encoding"
"encoding/json"
"net/http"
"regexp"
@@ -16,10 +15,8 @@ var (
)
var (
_ json.Marshaler = new(Selector)
_ json.Unmarshaler = new(Selector)
_ encoding.TextMarshaler = new(Selector)
_ encoding.TextUnmarshaler = new(Selector)
_ json.Marshaler = new(Selector)
_ json.Unmarshaler = new(Selector)
)
var (
@@ -82,15 +79,6 @@ func (typed *Selector) UnmarshalJSON(data []byte) error {
return nil
}
func (selector Selector) MarshalText() ([]byte, error) {
return []byte(selector.val), nil
}
func (selector *Selector) UnmarshalText(text []byte) error {
*selector = Selector{val: string(text)}
return nil
}
func IsValidSelector(typed Type, selector string) error {
switch typed {
case TypeUser:

Some files were not shown because too many files have changed in this diff Show More